# Created by: Nicola Vitale # $FreeBSD$ PORTNAME= Breve PORTVERSION= 1.3.0 PORTREVISION= 2 CATEGORIES= devel python MASTER_SITES= http://breve.twisty-industries.com/downloads/ \ LOCAL/nivit/${PORTNAME} PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX} MAINTAINER= nivit@FreeBSD.org COMMENT= Python s-expression style template engine USES= python USE_PYTHON= distutils autoplist .include go-tangerine Git repository'/>
aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/tangerine-network/mcl')
-rw-r--r--vendor/github.com/tangerine-network/mcl/.gitignore13
-rw-r--r--vendor/github.com/tangerine-network/mcl/.travis.yml17
-rw-r--r--vendor/github.com/tangerine-network/mcl/CMakeLists.txt119
-rw-r--r--vendor/github.com/tangerine-network/mcl/COPYRIGHT47
-rw-r--r--vendor/github.com/tangerine-network/mcl/Makefile373
-rw-r--r--vendor/github.com/tangerine-network/mcl/bench.txt114
-rw-r--r--vendor/github.com/tangerine-network/mcl/common.mk117
-rw-r--r--vendor/github.com/tangerine-network/mcl/common.props26
-rw-r--r--vendor/github.com/tangerine-network/mcl/debug.props14
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/App.config6
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/Properties/AssemblyInfo.cs36
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.cs475
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.csproj62
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.sln22
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/cs/bn256_test.cs149
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl.go659
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl_test.go157
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/Bn256Test.java104
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/ElgamalTest.java144
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/Makefile64
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/bn256.i31
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/bn256_impl.hpp249
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/bn256_wrap.cxx1542
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/elgamal.i28
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_impl.hpp147
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_wrap.cxx1129
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/java.md95
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/make_wrap.bat23
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/run-bn256.bat9
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/run-elgamal.bat9
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/java/set-java-path.bat8
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/js/export-functions.py73
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/js/pre-mcl.js5
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/python/pairing.py80
-rw-r--r--vendor/github.com/tangerine-network/mcl/ffi/python/she.py298
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/array.hpp197
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/atoi.hpp239
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/benchmark.hpp212
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/bit_operation.hpp139
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/critical_section.hpp60
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/crypto.hpp321
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/endian.hpp224
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/exception.hpp252
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/hash.hpp67
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/inttype.hpp163
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/itoa.hpp337
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/link_libeay32.hpp21
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/link_mpir.hpp18
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/link_ssleay32.hpp19
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/mutex.hpp141
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/option.hpp723
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/random_generator.hpp153
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/serializer.hpp363
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/sha2.hpp467
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/stream.hpp267
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/test.hpp373
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/unordered_map.hpp13
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/cybozu/xorshift.hpp189
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/aggregate_sig.hpp265
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/ahe.hpp76
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/array.hpp167
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bls12_381.hpp15
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bn.h428
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bn.hpp2261
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bn256.hpp15
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bn384.hpp15
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/bn512.hpp14
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/conversion.hpp495
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/curve_type.h35
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/ec.hpp1045
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.h105
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.hpp257
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/ecparam.hpp191
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/elgamal.hpp612
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/fp.hpp661
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/fp_tower.hpp1364
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/gmp_util.hpp954
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/impl/bn_c_impl.hpp643
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/lagrange.hpp107
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/op.hpp389
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/operator.hpp177
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/paillier.hpp84
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/randgen.hpp156
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/she.h270
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/she.hpp1939
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/util.hpp285
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/vint.hpp1987
-rw-r--r--vendor/github.com/tangerine-network/mcl/include/mcl/window_method.hpp175
-rw-r--r--vendor/github.com/tangerine-network/mcl/lib/.emptydir0
-rw-r--r--vendor/github.com/tangerine-network/mcl/mcl.sln57
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/bench.txt21
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/karatsuba.cpp75
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/mul.cpp58
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/precompute.cpp30
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/bench.sh6
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/bench4.txt99
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/bench6.txt99
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/bench8.txt99
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/nizkp.pdfbin0 -> 28787 bytes
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/she-api-ja.md314
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/she-api.md322
-rw-r--r--vendor/github.com/tangerine-network/mcl/misc/she/she.pdfbin0 -> 25716 bytes
-rw-r--r--vendor/github.com/tangerine-network/mcl/mk.bat20
-rw-r--r--vendor/github.com/tangerine-network/mcl/mklib.bat39
-rw-r--r--vendor/github.com/tangerine-network/mcl/obj/.emptydir0
-rw-r--r--vendor/github.com/tangerine-network/mcl/readme.md457
-rw-r--r--vendor/github.com/tangerine-network/mcl/release.props12
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/bench.cpp233
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/bls_sig.cpp70
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/ecdh.cpp64
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/large.cpp125
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/pairing.cpp56
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/pairing_c.c52
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/random.cpp29
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/rawbench.cpp180
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/she_make_dlp_table.cpp69
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/she_smpl.cpp125
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/tri-dh.cpp97
-rw-r--r--vendor/github.com/tangerine-network/mcl/sample/vote.cpp206
-rw-r--r--vendor/github.com/tangerine-network/mcl/setvar.bat2
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s13197
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/arm.s84189
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s154
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm153
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm0
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s14155
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s16652
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s13830
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s16313
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s71547
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86.s73785
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/bn_c256.cpp6
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/bn_c384.cpp7
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/bn_c384_256.cpp7
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/bn_c512.cpp6
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/bn_c_impl.hpp517
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/ecdsa_c.cpp110
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/fp.cpp646
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/fp_generator.hpp3885
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/gen.cpp999
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/llvm_gen.hpp616
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/low_func.hpp706
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/low_func_llvm.hpp94
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/proj/mcl.vcxproj92
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/proto.hpp81
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/she_c256.cpp2
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/she_c384.cpp2
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/she_c_impl.hpp681
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/xbyak/xbyak.h2611
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/xbyak/xbyak_mnemonic.h1972
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/xbyak/xbyak_util.h653
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/aggregate_sig_test.cpp74
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/array_test.cpp104
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/base_test.cpp392
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bench.hpp192
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bls12_test.cpp720
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn384_test.cpp83
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn512_test.cpp68
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_c256_test.cpp6
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_c384_256_test.cpp7
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_c384_test.cpp6
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_c512_test.cpp6
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_c_test.hpp699
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/bn_test.cpp408
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/conversion_test.cpp96
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/ec_test.cpp573
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/ecdsa_c_test.cpp51
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/ecdsa_test.cpp69
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/elgamal_test.cpp155
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/fp_generator_test.cpp207
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/fp_test.cpp1046
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/fp_tower_test.cpp477
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/fp_util_test.cpp270
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/glv_test.cpp209
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/gmp_test.cpp70
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/low_test.cpp73
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/mk32.sh1
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/modp_test.cpp37
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/mont_fp_test.cpp332
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/paillier_test.cpp24
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/proj/bn_test/bn_test.vcxproj88
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/proj/ec_test/ec_test.vcxproj88
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/proj/fp_test/fp_test.vcxproj88
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj88
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/she_c256_test.cpp2
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/she_c384_test.cpp2
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/she_c_test.hpp535
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/she_test.cpp756
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/sq_test.cpp21
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/vint_test.cpp1353
-rw-r--r--vendor/github.com/tangerine-network/mcl/test/window_method_test.cpp70
191 files changed, 356819 insertions, 0 deletions
diff --git a/vendor/github.com/tangerine-network/mcl/.gitignore b/vendor/github.com/tangerine-network/mcl/.gitignore
new file mode 100644
index 000000000..f5edb3706
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/.gitignore
@@ -0,0 +1,13 @@
+CVS
+java/*_wrap.cxx
+lib/*.so
+lib/*.a
+*.class
+GPATH
+GRTAGS
+GTAGS
+*.o
+*.d
+*.exe
+*.swp
+.cvsignore
diff --git a/vendor/github.com/tangerine-network/mcl/.travis.yml b/vendor/github.com/tangerine-network/mcl/.travis.yml
new file mode 100644
index 000000000..73a97e6aa
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/.travis.yml
@@ -0,0 +1,17 @@
+sudo: true
+dist: trusty
+language: cpp
+compiler:
+ - gcc
+ - clang
+addons:
+ apt:
+ packages:
+ - libgmp-dev
+script:
+ - make test_ci DEBUG=1 -j3
+ - make clean
+ - make test_ci CFLAGS_USER=-DMCL_DONT_USE_XBYAK -j3
+ - make clean
+ - make test_go
+
diff --git a/vendor/github.com/tangerine-network/mcl/CMakeLists.txt b/vendor/github.com/tangerine-network/mcl/CMakeLists.txt
new file mode 100644
index 000000000..aaa0a8cf2
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/CMakeLists.txt
@@ -0,0 +1,119 @@
+cmake_minimum_required (VERSION 2.6)
+project(mcl CXX ASM)
+set(SRCS src/fp.cpp)
+
+option(
+ MCL_MAX_BIT_SIZE
+ "max bit size for Fp"
+ 0
+)
+option(
+ DOWNLOAD_SOURCE
+ "download cybozulib_ext"
+ OFF
+)
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+if(MSVC)
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} /MT /W4 /Oy /Ox /EHsc /GS- /Zi /DNDEBUG /DNOMINMAX")
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} /MTd /W4 /DNOMINMAX")
+ link_directories(${CMAKE_SOURCE_DIR}/../cybozulib_ext/lib)
+ link_directories(${CMAKE_SOURCE_DIR}/lib)
+else()
+ if("${CFLAGS_OPT_USER}" STREQUAL "")
+ set(CFLAGS_OPT_USER "-O3 -DNDEBUG -march=native")
+ endif()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith ${CFLAGS_OPT_USER}")
+
+ if(${MCL_MAX_BIT_SIZE} GREATER 0)
+ add_definitions(-DMCL_MAX_BIT_SIZE=${MCL_MAX_BIT_SIZE})
+ endif()
+
+ if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
+ add_definitions(-DMCL_USE_LLVM=1)
+ set(SRCS ${SRCS} src/asm/aarch64.s)
+ set(CPU arch64)
+ elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm")
+ add_definitions(-DMCL_USE_LLVM=1)
+ set(SRCS ${SRCS} src/asm/arm.s)
+ set(CPU arm)
+ elseif(APPLE)
+ add_definitions(-DMCL_USE_LLVM=1)
+ set(SRCS ${SRCS} src/asm/x86-64mac.s src/asm/x86-64mac.bmi2.s)
+ set(CPU x86-64)
+ elseif(UNIX)
+ add_definitions(-DMCL_USE_LLVM=1)
+ set(SRCS ${SRCS} src/asm/x86-64.s src/asm/x86-64.bmi2.s)
+ set(CPU x86-64)
+ endif()
+ set(LIBS mcl gmp gmpxx crypto)
+endif()
+
+if(DOWNLOAD_SOURCE)
+ if(MSVC)
+ set(CYBOZULIB_EXT_TAG release20170521)
+ set(FILES config.h gmp-impl.h gmp-mparam.h gmp.h gmpxx.h longlong.h mpir.h mpirxx.h)
+ foreach(file IN ITEMS ${FILES})
+ file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/${file})
+ message("download cybozulib_ext/" ${file})
+ endforeach()
+ set(FILES aes.h applink.c asn1.h asn1_mac.h asn1t.h bio.h blowfish.h bn.h buffer.h camellia.h cast.h cmac.h cms.h comp.h conf.h conf_api.h crypto.h des.h des_old.h dh.h dsa.h dso.h dtls1.h e_os2.h ebcdic.h ec.h ecdh.h ecdsa.h engine.h err.h evp.h hmac.h idea.h krb5_asn.h kssl.h lhash.h md4.h md5.h mdc2.h modes.h obj_mac.h objects.h ocsp.h opensslconf.h opensslv.h ossl_typ.h pem.h pem2.h pkcs12.h pkcs7.h pqueue.h rand.h rc2.h rc4.h ripemd.h rsa.h safestack.h seed.h sha.h srp.h srtp.h ssl.h ssl2.h ssl23.h ssl3.h stack.h symhacks.h tls1.h ts.h txt_db.h ui.h ui_compat.h whrlpool.h x509.h x509_vfy.h x509v3.h)
+ foreach(file IN ITEMS ${FILES})
+ file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/openssl/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/openssl/${file})
+ message("download cybozulib_ext/openssl/" ${file})
+ endforeach()
+ set(FILES mpir.lib mpirxx.lib mpirxx.pdb ssleay32.lib libeay32.lib mpir.pdb)
+ foreach(file IN ITEMS ${FILES})
+ file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/lib/mt/14/${file} ${mcl_SOURCE_DIR}/lib/mt/14/${file})
+ message("download lib/mt/14/" ${file})
+ endforeach()
+ if(MSVC)
+ include_directories(
+ ${mcl_SOURCE_DIR}/include/cybozulib_ext
+ )
+ endif()
+ endif()
+else()
+ if(MSVC)
+ include_directories(
+ ${mcl_SOURCE_DIR}/../cybozulib_ext/include
+ )
+ endif()
+endif()
+
+include_directories(
+ ${mcl_SOURCE_DIR}/include
+)
+
+add_library(mcl STATIC ${SRCS})
+add_library(mcl_dy SHARED ${SRCS})
+target_link_libraries(mcl_dy ${LIBS})
+set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl)
+#set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl VERSION 1.0.0 SOVERSION 1)
+# For semantics of ABI compatibility including when you must bump SOVERSION, see:
+# https://community.kde.org/Policies/Binary_Compatibility_Issues_With_C%2B%2B#The_Do.27s_and_Don.27ts
+
+file(GLOB MCL_HEADERS include/mcl/*.hpp include/mcl/bn.h include/mcl/curve_type.h)
+file(GLOB CYBOZULIB_HEADERS include/cybozu/*.hpp)
+
+install(TARGETS mcl DESTINATION lib)
+install(TARGETS mcl_dy DESTINATION lib)
+install(FILES ${MCL_HEADERS} DESTINATION include/mcl)
+install(FILES include/mcl/impl/bn_c_impl.hpp DESTINATION include/mcl/impl)
+install(FILES ${CYBOZULIB_HEADERS} DESTINATION include/cybozu)
+
+set(TEST_BASE fp_test ec_test fp_util_test window_method_test elgamal_test fp_tower_test gmp_test bn_test glv_test)
+#set(TEST_BASE bn_test)
+foreach(base IN ITEMS ${TEST_BASE})
+ add_executable(
+ ${base}
+ test/${base}.cpp
+ )
+ target_link_libraries(
+ ${base}
+ ${LIBS}
+ )
+endforeach()
diff --git a/vendor/github.com/tangerine-network/mcl/COPYRIGHT b/vendor/github.com/tangerine-network/mcl/COPYRIGHT
new file mode 100644
index 000000000..90e49b4bc
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/COPYRIGHT
@@ -0,0 +1,47 @@
+
+Copyright (c) 2015 MITSUNARI Shigeo
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+Neither the name of the copyright owner nor the names of its contributors may
+be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+ソースコード形式かバイナリ形式か、変更するかしないかを問わず、以下の条件を満た
+す場合に限り、再頒布および使用が許可されます。
+
+ソースコードを再頒布する場合、上記の著作権表示、本条件一覧、および下記免責条項
+を含めること。
+バイナリ形式で再頒布する場合、頒布物に付属のドキュメント等の資料に、上記の著作
+権表示、本条件一覧、および下記免責条項を含めること。
+書面による特別の許可なしに、本ソフトウェアから派生した製品の宣伝または販売促進
+に、著作権者の名前またはコントリビューターの名前を使用してはならない。
+本ソフトウェアは、著作権者およびコントリビューターによって「現状のまま」提供さ
+れており、明示黙示を問わず、商業的な使用可能性、および特定の目的に対する適合性
+に関する暗黙の保証も含め、またそれに限定されない、いかなる保証もありません。
+著作権者もコントリビューターも、事由のいかんを問わず、 損害発生の原因いかんを
+問わず、かつ責任の根拠が契約であるか厳格責任であるか(過失その他の)不法行為で
+あるかを問わず、仮にそのような損害が発生する可能性を知らされていたとしても、
+本ソフトウェアの使用によって発生した(代替品または代用サービスの調達、使用の
+喪失、データの喪失、利益の喪失、業務の中断も含め、またそれに限定されない)直接
+損害、間接損害、偶発的な損害、特別損害、懲罰的損害、または結果損害について、
+一切責任を負わないものとします。
diff --git a/vendor/github.com/tangerine-network/mcl/Makefile b/vendor/github.com/tangerine-network/mcl/Makefile
new file mode 100644
index 000000000..7df1dd300
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/Makefile
@@ -0,0 +1,373 @@
+include common.mk
+LIB_DIR=lib
+OBJ_DIR=obj
+EXE_DIR=bin
+SRC_SRC=fp.cpp bn_c256.cpp bn_c384.cpp bn_c512.cpp she_c256.cpp
+TEST_SRC=fp_test.cpp ec_test.cpp fp_util_test.cpp window_method_test.cpp elgamal_test.cpp fp_tower_test.cpp gmp_test.cpp bn_test.cpp bn384_test.cpp glv_test.cpp paillier_test.cpp she_test.cpp vint_test.cpp bn512_test.cpp ecdsa_test.cpp conversion_test.cpp
+TEST_SRC+=bn_c256_test.cpp bn_c384_test.cpp bn_c384_256_test.cpp bn_c512_test.cpp she_c256_test.cpp she_c384_test.cpp
+TEST_SRC+=aggregate_sig_test.cpp array_test.cpp
+TEST_SRC+=bls12_test.cpp
+TEST_SRC+=ecdsa_c_test.cpp
+TEST_SRC+=modp_test.cpp
+ifeq ($(CPU),x86-64)
+ MCL_USE_XBYAK?=1
+ TEST_SRC+=mont_fp_test.cpp sq_test.cpp
+ ifeq ($(USE_LOW_ASM),1)
+ TEST_SRC+=low_test.cpp
+ endif
+ ifeq ($(MCL_USE_XBYAK),1)
+ TEST_SRC+=fp_generator_test.cpp
+ endif
+endif
+SAMPLE_SRC=bench.cpp ecdh.cpp random.cpp rawbench.cpp vote.cpp pairing.cpp large.cpp tri-dh.cpp bls_sig.cpp pairing_c.c she_smpl.cpp
+
+ifneq ($(MCL_MAX_BIT_SIZE),)
+ CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE)
+endif
+ifeq ($(MCL_USE_XBYAK),0)
+ CFLAGS+=-DMCL_DONT_USE_XBYAK
+endif
+##################################################################
+MCL_LIB=$(LIB_DIR)/libmcl.a
+MCL_SNAME=mcl
+BN256_SNAME=mclbn256
+BN384_SNAME=mclbn384
+BN384_256_SNAME=mclbn384_256
+BN512_SNAME=mclbn512
+SHE256_SNAME=mclshe256
+MCL_SLIB=$(LIB_DIR)/lib$(MCL_SNAME).$(LIB_SUF)
+BN256_LIB=$(LIB_DIR)/libmclbn256.a
+BN256_SLIB=$(LIB_DIR)/lib$(BN256_SNAME).$(LIB_SUF)
+BN384_LIB=$(LIB_DIR)/libmclbn384.a
+BN384_SLIB=$(LIB_DIR)/lib$(BN384_SNAME).$(LIB_SUF)
+BN384_256_LIB=$(LIB_DIR)/libmclbn384_256.a
+BN384_256_SLIB=$(LIB_DIR)/lib$(BN384_256_SNAME).$(LIB_SUF)
+BN512_LIB=$(LIB_DIR)/libmclbn512.a
+BN512_SLIB=$(LIB_DIR)/lib$(BN512_SNAME).$(LIB_SUF)
+SHE256_LIB=$(LIB_DIR)/libmclshe256.a
+SHE256_SLIB=$(LIB_DIR)/lib$(SHE256_SNAME).$(LIB_SUF)
+SHE384_LIB=$(LIB_DIR)/libmclshe384.a
+ECDSA_LIB=$(LIB_DIR)/libmclecdsa.a
+all: $(MCL_LIB) $(MCL_SLIB) $(BN256_LIB) $(BN256_SLIB) $(BN384_LIB) $(BN384_SLIB) $(BN384_256_LIB) $(BN384_256_SLIB) $(BN512_LIB) $(BN512_SLIB) $(SHE256_LIB) $(SHE256_SLIB) $(SHE384_lib) $(ECDSA_LIB)
+
+#LLVM_VER=-3.8
+LLVM_LLC=llc$(LLVM_VER)
+LLVM_OPT=opt$(LLVM_VER)
+LLVM_OPT_VERSION=$(shell $(LLVM_OPT) --version 2>/dev/null | awk '/version/ {print $$3}')
+GEN_EXE=src/gen
+# incompatibility between llvm 3.4 and the later version
+ifneq ($(LLVM_OPT_VERSION),)
+ifeq ($(shell expr $(LLVM_OPT_VERSION) \< 3.5.0),1)
+ GEN_EXE_OPT=-old
+endif
+endif
+ifeq ($(OS),mac)
+ ASM_SRC_PATH_NAME=src/asm/$(CPU)mac
+else
+ ASM_SRC_PATH_NAME=src/asm/$(CPU)
+endif
+ifneq ($(CPU),)
+ ASM_SRC=$(ASM_SRC_PATH_NAME).s
+endif
+ASM_OBJ=$(OBJ_DIR)/$(CPU).o
+LIB_OBJ=$(OBJ_DIR)/fp.o
+BN256_OBJ=$(OBJ_DIR)/bn_c256.o
+BN384_OBJ=$(OBJ_DIR)/bn_c384.o
+BN384_256_OBJ=$(OBJ_DIR)/bn_c384_256.o
+BN512_OBJ=$(OBJ_DIR)/bn_c512.o
+SHE256_OBJ=$(OBJ_DIR)/she_c256.o
+SHE384_OBJ=$(OBJ_DIR)/she_c384.o
+ECDSA_OBJ=$(OBJ_DIR)/ecdsa_c.o
+FUNC_LIST=src/func.list
+ifeq ($(findstring $(OS),mingw64/cygwin),)
+ MCL_USE_LLVM?=1
+else
+ MCL_USE_LLVM=0
+endif
+ifeq ($(MCL_USE_LLVM),1)
+ CFLAGS+=-DMCL_USE_LLVM=1
+ LIB_OBJ+=$(ASM_OBJ)
+ # special case for intel with bmi2
+ ifeq ($(INTEL),1)
+ LIB_OBJ+=$(OBJ_DIR)/$(CPU).bmi2.o
+ endif
+endif
+LLVM_SRC=src/base$(BIT).ll
+
+# CPU is used for llvm
+# see $(LLVM_LLC) --version
+LLVM_FLAGS=-march=$(CPU) -relocation-model=pic #-misched=ilpmax
+LLVM_FLAGS+=-pre-RA-sched=list-ilp -max-sched-reorder=128 -mattr=-sse
+
+#HAS_BMI2=$(shell cat "/proc/cpuinfo" | grep bmi2 >/dev/null && echo "1")
+#ifeq ($(HAS_BMI2),1)
+# LLVM_FLAGS+=-mattr=bmi2
+#endif
+
+ifeq ($(USE_LOW_ASM),1)
+ LOW_ASM_OBJ=$(LOW_ASM_SRC:.asm=.o)
+ LIB_OBJ+=$(LOW_ASM_OBJ)
+endif
+
+ifeq ($(UPDATE_ASM),1)
+ ASM_SRC_DEP=$(LLVM_SRC)
+ ASM_BMI2_SRC_DEP=src/base$(BIT).bmi2.ll
+else
+ ASM_SRC_DEP=
+ ASM_BMI2_SRC_DEP=
+endif
+
+ifneq ($(findstring $(OS),mac/mingw64),)
+ BN256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib
+ BN384_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib
+ BN384_256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib
+ BN512_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib
+ SHE256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib
+endif
+ifeq ($(OS),mingw64)
+ MCL_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(MCL_SNAME).a
+ BN256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN256_SNAME).a
+ BN384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_SNAME).a
+ BN384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_256_SNAME).a
+ BN512_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN512_SNAME).a
+ SHE256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(SHE256_SNAME).a
+endif
+
+$(MCL_LIB): $(LIB_OBJ)
+ $(AR) $@ $(LIB_OBJ)
+
+$(MCL_SLIB): $(LIB_OBJ)
+ $(PRE)$(CXX) -o $@ $(LIB_OBJ) -shared $(LDFLAGS) $(MCL_SLIB_LDFLAGS)
+
+$(BN256_LIB): $(BN256_OBJ)
+ $(AR) $@ $(BN256_OBJ)
+
+$(SHE256_LIB): $(SHE256_OBJ)
+ $(AR) $@ $(SHE256_OBJ)
+
+$(SHE256_SLIB): $(SHE256_OBJ) $(MCL_LIB)
+ $(PRE)$(CXX) -o $@ $(SHE256_OBJ) $(MCL_LIB) -shared $(LDFLAGS) $(SHE256_SLIB_LDFLAGS)
+
+$(SHE384_LIB): $(SHE384_OBJ)
+ $(AR) $@ $(SHE384_OBJ)
+
+$(ECDSA_LIB): $(ECDSA_OBJ)
+ $(AR) $@ $(ECDSA_OBJ)
+
+$(BN256_SLIB): $(BN256_OBJ) $(MCL_SLIB)
+ $(PRE)$(CXX) -o $@ $(BN256_OBJ) -shared $(LDFLAGS) $(BN256_SLIB_LDFLAGS)
+
+$(BN384_LIB): $(BN384_OBJ)
+ $(AR) $@ $(BN384_OBJ)
+
+$(BN384_256_LIB): $(BN384_256_OBJ)
+ $(AR) $@ $(BN384_256_OBJ)
+
+$(BN512_LIB): $(BN512_OBJ)
+ $(AR) $@ $(BN512_OBJ)
+
+$(BN384_SLIB): $(BN384_OBJ) $(MCL_SLIB)
+ $(PRE)$(CXX) -o $@ $(BN384_OBJ) -shared $(LDFLAGS) $(BN384_SLIB_LDFLAGS)
+
+$(BN384_256_SLIB): $(BN384_256_OBJ) $(MCL_SLIB)
+ $(PRE)$(CXX) -o $@ $(BN384_256_OBJ) -shared $(LDFLAGS) $(BN384_256_SLIB_LDFLAGS)
+
+$(BN512_SLIB): $(BN512_OBJ) $(MCL_SLIB)
+ $(PRE)$(CXX) -o $@ $(BN512_OBJ) -shared $(LDFLAGS) $(BN512_SLIB_LDFLAGS)
+
+$(ASM_OBJ): $(ASM_SRC)
+ $(PRE)$(CXX) -c $< -o $@ $(CFLAGS)
+
+$(ASM_SRC): $(ASM_SRC_DEP)
+ $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS)
+
+$(LLVM_SRC): $(GEN_EXE) $(FUNC_LIST)
+ $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) > $@
+
+$(ASM_SRC_PATH_NAME).bmi2.s: $(ASM_BMI2_SRC_DEP)
+ $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) -mattr=bmi2
+
+$(OBJ_DIR)/$(CPU).bmi2.o: $(ASM_SRC_PATH_NAME).bmi2.s
+ $(PRE)$(CXX) -c $< -o $@ $(CFLAGS)
+
+src/base$(BIT).bmi2.ll: $(GEN_EXE)
+ $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) -s bmi2 > $@
+
+src/base64m.ll: $(GEN_EXE)
+ $(GEN_EXE) $(GEN_EXE_OPT) -wasm > $@
+
+$(FUNC_LIST): $(LOW_ASM_SRC)
+ifeq ($(USE_LOW_ASM),1)
+ $(shell awk '/global/ { print $$2}' $(LOW_ASM_SRC) > $(FUNC_LIST))
+ $(shell awk '/proc/ { print $$2}' $(LOW_ASM_SRC) >> $(FUNC_LIST))
+else
+ $(shell touch $(FUNC_LIST))
+endif
+
+$(GEN_EXE): src/gen.cpp src/llvm_gen.hpp
+ $(CXX) -o $@ $< $(CFLAGS)
+
+asm: $(LLVM_SRC)
+ $(LLVM_OPT) -O3 -o - $(LLVM_SRC) | $(LLVM_LLC) -O3 $(LLVM_FLAGS) -x86-asm-syntax=intel
+
+$(LOW_ASM_OBJ): $(LOW_ASM_SRC)
+ $(ASM) $<
+
+# set PATH for mingw, set LD_LIBRARY_PATH is for other env
+COMMON_LIB_PATH="../../../lib"
+PATH_VAL=$$PATH:$(COMMON_LIB_PATH) LD_LIBRARY_PATH=$(COMMON_LIB_PATH) DYLD_LIBRARY_PATH=$(COMMON_LIB_PATH) CGO_CFLAGS="-I$(shell pwd)/include" CGO_LDFLAGS="-L../../../lib"
+test_go256: $(MCL_SLIB) $(BN256_SLIB)
+ cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn256 .
+
+test_go384: $(MCL_SLIB) $(BN384_SLIB)
+ cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384 .
+
+test_go384_256: $(MCL_SLIB) $(BN384_256_SLIB)
+ cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384_256 .
+
+test_go:
+ $(MAKE) test_go256
+ $(MAKE) test_go384
+ $(MAKE) test_go384_256
+
+test_python_she: $(SHE256_SLIB)
+ cd ffi/python && env LD_LIBRARY_PATH="../../lib" DYLD_LIBRARY_PATH="../../lib" PATH=$$PATH:"../../lib" python3 she.py
+test_python:
+ $(MAKE) test_python_she
+
+test_java:
+ $(MAKE) -C ffi/java test
+
+##################################################################
+
+VPATH=test sample src
+
+.SUFFIXES: .cpp .d .exe .c .o
+
+$(OBJ_DIR)/%.o: %.cpp
+ $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d)
+
+$(OBJ_DIR)/%.o: %.c
+ $(PRE)$(CC) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d)
+
+$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/bn_c256_test.exe: $(OBJ_DIR)/bn_c256_test.o $(BN256_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/bn_c384_test.exe: $(OBJ_DIR)/bn_c384_test.o $(BN384_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(BN384_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/bn_c384_256_test.exe: $(OBJ_DIR)/bn_c384_256_test.o $(BN384_256_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(BN384_256_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/bn_c512_test.exe: $(OBJ_DIR)/bn_c512_test.o $(BN512_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(BN512_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/pairing_c.exe: $(OBJ_DIR)/pairing_c.o $(BN256_LIB) $(MCL_LIB)
+ $(PRE)$(CC) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) -lstdc++
+
+$(EXE_DIR)/she_c256_test.exe: $(OBJ_DIR)/she_c256_test.o $(SHE256_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(SHE256_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/she_c384_test.exe: $(OBJ_DIR)/she_c384_test.o $(SHE384_LIB) $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(SHE384_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(EXE_DIR)/ecdsa_c_test.exe: $(OBJ_DIR)/ecdsa_c_test.o $(ECDSA_LIB) $(MCL_LIB) src/ecdsa_c.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h
+ $(PRE)$(CXX) $< -o $@ $(ECDSA_LIB) $(MCL_LIB) $(LDFLAGS)
+
+$(OBJ_DIR)/modp_test.o: test/modp_test.cpp
+ $(PRE)$(CXX) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) -DMCL_USE_VINT -DMCL_MAX_BIT_SIZE=384 -DMCL_VINT_64BIT_PORTABLE -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -I./include -O2 $(CFLAGS_WARN)
+
+$(EXE_DIR)/modp_test.exe: $(OBJ_DIR)/modp_test.o
+ $(PRE)$(CXX) $< -o $@
+
+SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(addsuffix .exe,$(basename $(SAMPLE_SRC))))
+sample: $(SAMPLE_EXE) $(MCL_LIB)
+
+TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe))
+test_ci: $(TEST_EXE)
+ @sh -ec 'for i in $(TEST_EXE); do echo $$i; env LSAN_OPTIONS=verbosity=1:log_threads=1 $$i; done'
+test: $(TEST_EXE)
+ @echo test $(TEST_EXE)
+ @sh -ec 'for i in $(TEST_EXE); do $$i|grep "ctest:name"; done' > result.txt
+ @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi
+
+EMCC_OPT=-I./include -I./src -Wall -Wextra
+EMCC_OPT+=-O3 -DNDEBUG -DMCLSHE_WIN_SIZE=8
+EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1
+EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION
+EMCC_OPT+=-s ABORTING_MALLOC=0
+SHE_C_DEP=src/fp.cpp src/she_c_impl.hpp include/mcl/she.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/she.h Makefile
+MCL_C_DEP=src/fp.cpp include/mcl/impl/bn_c_impl.hpp include/mcl/bn.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/bn.h Makefile
+ifeq ($(MCL_USE_LLVM),2)
+ EMCC_OPT+=src/base64m.ll -DMCL_USE_LLVM
+ SHE_C_DEP+=src/base64m.ll
+endif
+../she-wasm/she_c.js: src/she_c256.cpp $(SHE_C_DEP)
+ emcc -o $@ src/fp.cpp src/she_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0
+
+../she-wasm/she_c384.js: src/she_c384.cpp $(SHE_C_DEP)
+ emcc -o $@ src/fp.cpp src/she_c384.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0
+
+../mcl-wasm/mcl_c.js: src/bn_c256.cpp $(MCL_C_DEP)
+ emcc -o $@ src/fp.cpp src/bn_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions -MD -MP -MF obj/mcl_c.d
+
+../mcl-wasm/mcl_c512.js: src/bn_c512.cpp $(MCL_C_DEP)
+ emcc -o $@ src/fp.cpp src/bn_c512.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=512 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions
+
+../ecdsa-wasm/ecdsa_c.js: src/ecdsa_c.cpp src/fp.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h Makefile
+ emcc -o $@ src/fp.cpp src/ecdsa_c.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions
+
+mcl-wasm:
+ $(MAKE) ../mcl-wasm/mcl_c.js
+ $(MAKE) ../mcl-wasm/mcl_c512.js
+
+she-wasm:
+ $(MAKE) ../she-wasm/she_c.js
+ $(MAKE) ../she-wasm/she_c384.js
+
+ecdsa-wasm:
+ $(MAKE) ../ecdsa-wasm/ecdsa_c.js
+
+# test
+bin/emu:
+ $(CXX) -g -o $@ src/fp.cpp src/bn_c256.cpp test/bn_c256_test.cpp -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_64BIT_PORTABLE -DMCL_VINT_FIXED_BUFFER -DMCL_MAX_BIT_SIZE=256 -I./include
+bin/pairing_c_min.exe: sample/pairing_c.c include/mcl/vint.hpp src/fp.cpp include/mcl/bn.hpp
+# $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-exceptions -fno-rtti -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG -pg
+ $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG
+
+make_tbl:
+ $(MAKE) ../bls/src/qcoeff-bn254.hpp
+
+../bls/src/qcoeff-bn254.hpp: $(MCL_LIB) misc/precompute.cpp
+ $(CXX) -o misc/precompute misc/precompute.cpp $(CFLAGS) $(MCL_LIB) $(LDFLAGS)
+ ./misc/precompute > ../bls/src/qcoeff-bn254.hpp
+
+update_xbyak:
+ cp -a ../xbyak/xbyak/xbyak.h ../xbyak/xbyak/xbyak_util.h ../xbyak/xbyak/xbyak_mnemonic.h src/xbyak/
+
+update_cybozulib:
+ cp -a $(addprefix ../cybozulib/,$(wildcard include/cybozu/*.hpp)) include/cybozu/
+
+clean:
+ $(RM) $(LIB_DIR)/*.a $(LIB_DIR)/*.$(LIB_SUF) $(OBJ_DIR)/*.o $(OBJ_DIR)/*.obj $(OBJ_DIR)/*.d $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_OBJ) $(LIB_OBJ) $(BN256_OBJ) $(BN384_OBJ) $(BN512_OBJ) $(LLVM_SRC) $(FUNC_LIST) src/*.ll lib/*.a
+
+ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC)
+DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(addsuffix .d,$(basename $(ALL_SRC))))
+-include $(DEPEND_FILE)
+
+PREFIX?=/usr/local
+install: lib/libmcl.a lib/libmcl.$(LIB_SUF)
+ $(MKDIR) $(PREFIX)/include/mcl
+ cp -a include/mcl/ $(PREFIX)/include/
+ cp -a include/cybozu/ $(PREFIX)/include/
+ $(MKDIR) $(PREFIX)/lib
+ cp -a lib/libmcl.a lib/libmcl.$(LIB_SUF) $(PREFIX)/lib/
+
+.PHONY: test mcl-wasm she-wasm bin/emu
+
+# don't remove these files automatically
+.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o))
+
diff --git a/vendor/github.com/tangerine-network/mcl/bench.txt b/vendor/github.com/tangerine-network/mcl/bench.txt
new file mode 100644
index 000000000..35e47dca5
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/bench.txt
@@ -0,0 +1,114 @@
+-----------------------------------------------------------------------------
+Core i7-7700@3.6GHz Firefox 58.0.1(64-bit)
+ BN254 Fp381_1 Fp462
+op msec
+Fr::setByCSPRNG 0.022 0.021 0.019
+pairing 2.446 7.353 14.596
+millerLoop 1.467 4.219 8.598
+finalExp 0.97 3.127 6.005
+precomputedMillerLoop 1.087 3.171 6.305
+G1::add 0.007 0.01 0.014
+G1::dbl 0.005 0.007 0.011
+G1::mul 0.479 1.529 3.346
+G2::add 0.013 0.022 0.033
+G2::dbl 0.01 0.016 0.025
+G2::mul 0.989 2.955 5.921
+hashAndMapToG1 0.135 0.309 0.76
+hashAndMapToG2 2.14 6.44 14.249
+Fr::add 0.004 0.003 0.003
+Fr::mul 0.004 0.004 0.005
+Fr::sqr 0.003 0.003 0.004
+Fr::inv 0.025 0.038 0.05
+GT::add 0.005 0.004 0.005
+GT::mul 0.016 0.027 0.041
+GT::sqr 0.012 0.018 0.028
+GT::inv 0.051 0.081 0.122
+
+-----------------------------------------------------------------------------
+iPhone7 iOS 11.2.1 Safari/604.1
+ BN254 Fp381_1 Fp462
+op msec
+Fr::setByCSPRNG 0.041 0.038 0.154
+pairing 3.9 11.752 22.578
+millerLoop 2.29 6.55 13.067
+finalExp 1.501 4.741 9.016
+precomputedMillerLoop 1.675 4.818 9.492
+G1::add 0.006 0.015 0.018
+G1::dbl 0.005 0.01 0.019
+G1::mul 0.843 2.615 5.339
+G2::add 0.015 0.03 0.048
+G2::dbl 0.011 0.022 0.034
+G2::mul 1.596 4.581 9.077
+hashAndMapToG1 0.212 0.507 1.201
+hashAndMapToG2 3.486 9.93 21.817
+Fr::add 0.002 0.002 0.002
+Fr::mul 0.002 0.003 0.003
+Fr::sqr 0.002 0.003 0.004
+Fr::inv 0.037 0.062 0.078
+GT::add 0.003 0.003 0.003
+GT::mul 0.021 0.037 0.058
+GT::sqr 0.014 0.026 0.04
+GT::inv 0.074 0.131 0.19
+-----------------------------------------------------------------------------
+Core i7-7700@3.6GHz Linux gcc.5.4.0
+
+ BN254 Fp381_1 Fp462
+G1::mulCT 202.807Kclk 597.410Kclk 1.658Mclk
+G1::mulCTsmall 200.968Kclk 596.074Kclk 1.650Mclk
+G1::mul 185.935Kclk 555.147Kclk 1.495Mclk
+G1::mulsmall 1.856Kclk 3.740Kclk 8.054Kclk
+G1::add 866.89 clk 1.710Kclk 3.663Kclk
+G1::dbl 798.60 clk 1.770Kclk 3.755Kclk
+G2::mulCT 391.655Kclk 1.351Mclk 3.102Mclk
+G2::mulCTsmall 369.134Kclk 1.358Mclk 3.105Mclk
+G2::mul 400.098Kclk 1.277Mclk 3.009Mclk
+G2::mulsmall 5.774Kclk 12.806Kclk 25.374Kclk
+G2::add 2.696Kclk 7.547Kclk 14.683Kclk
+G2::dbl 2.600Kclk 5.366Kclk 10.436Kclk
+GT::pow 727.157Kclk 1.991Mclk 4.364Mclk
+hashAndMapToG1 27.953Kclk 87.291Kclk 200.972Kclk
+hashAndMapToG2 775.186Kclk 2.629Mclk 6.937Mclk
+Fp::add 11.48 clk 69.54 clk 21.36 clk
+Fp::mul 63.11 clk 134.90 clk 303.75 clk
+Fp::sqr 64.39 clk 134.29 clk 305.38 clk
+Fp::inv 2.302Kclk 4.185Kclk 5.485Kclk
+GT::add 180.93 clk 247.70 clk 256.55 clk
+GT::mul 5.278Kclk 10.887Kclk 19.844Kclk
+GT::sqr 3.666Kclk 7.444Kclk 13.694Kclk
+GT::inv 11.322Kclk 22.480Kclk 41.796Kclk
+pairing 1.044Mclk 3.445Mclk 7.789Mclk
+millerLoop 634.214Kclk 1.913Mclk 4.466Mclk
+finalExp 423.413Kclk 1.535Mclk 3.328Mclk
+precomputedML 479.849Kclk 1.461Mclk 3.299Mclk
+-----------------------------------------------------------------------------
+
+1.2GHz ARM Cortex-A53 [HiKey] Linux gcc 4.9.2
+
+ BN254 Fp381_1 Fp462
+G1::mulCT 858.149usec 2.780msec 8.507msec
+G1::mulCTsmall 854.535usec 2.773msec 8.499msec
+G1::mul 743.100usec 2.484msec 7.536msec
+G1::mulsmall 7.680usec 16.528usec 41.818usec
+G1::add 3.347usec 7.363usec 18.544usec
+G1::dbl 3.294usec 7.351usec 18.472usec
+G2::mulCT 1.627msec 5.083msec 12.142msec
+G2::mulCTsmall 1.534msec 5.124msec 12.125msec
+G2::mul 1.677msec 4.806msec 11.757msec
+G2::mulsmall 23.581usec 48.504usec 96.780usec
+G2::add 10.751usec 27.759usec 54.392usec
+G2::dbl 10.076usec 20.625usec 42.032usec
+GT::pow 2.662msec 7.091msec 14.042msec
+hashAndMapToG1 111.256usec 372.665usec 1.031msec
+hashAndMapToG2 3.199msec 10.168msec 27.391msec
+Fp::add 27.19nsec 38.02nsec 45.68nsec
+Fp::mul 279.17nsec 628.44nsec 1.662usec
+Fp::sqr 276.56nsec 651.67nsec 1.675usec
+Fp::inv 9.743usec 14.364usec 18.116usec
+GT::add 373.18nsec 530.62nsec 625.26nsec
+GT::mul 19.557usec 38.623usec 63.111usec
+GT::sqr 13.345usec 26.218usec 43.008usec
+GT::inv 44.119usec 84.581usec 153.046usec
+pairing 3.913msec 12.606msec 26.818msec
+millerLoop 2.402msec 7.202msec 15.711msec
+finalExp 1.506msec 5.395msec 11.098msec
+precomputedML 1.815msec 5.447msec 11.094msec
diff --git a/vendor/github.com/tangerine-network/mcl/common.mk b/vendor/github.com/tangerine-network/mcl/common.mk
new file mode 100644
index 000000000..5c749e1a6
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/common.mk
@@ -0,0 +1,117 @@
+GCC_VER=$(shell $(PRE)$(CC) -dumpversion)
+UNAME_S=$(shell uname -s)
+ifeq ($(UNAME_S),Linux)
+ OS=Linux
+endif
+ifeq ($(findstring MINGW64,$(UNAME_S)),MINGW64)
+ OS=mingw64
+ CFLAGS+=-D__USE_MINGW_ANSI_STDIO=1
+endif
+ifeq ($(findstring CYGWIN,$(UNAME_S)),CYGWIN)
+ OS=cygwin
+endif
+ifeq ($(UNAME_S),Darwin)
+ OS=mac
+ ARCH=x86_64
+ LIB_SUF=dylib
+ OPENSSL_DIR?=/usr/local/opt/openssl
+ CFLAGS+=-I$(OPENSSL_DIR)/include
+ LDFLAGS+=-L$(OPENSSL_DIR)/lib
+ GMP_DIR?=/usr/local/opt/gmp
+ CFLAGS+=-I$(GMP_DIR)/include
+ LDFLAGS+=-L$(GMP_DIR)/lib
+else
+ LIB_SUF=so
+endif
+ARCH?=$(shell uname -m)
+ifneq ($(findstring $(ARCH),x86_64/amd64),)
+ CPU=x86-64
+ INTEL=1
+ ifeq ($(findstring $(OS),mingw64/cygwin),)
+ GCC_EXT=1
+ endif
+ BIT=64
+ BIT_OPT=-m64
+ #LOW_ASM_SRC=src/asm/low_x86-64.asm
+ #ASM=nasm -felf64
+endif
+ifeq ($(ARCH),x86)
+ CPU=x86
+ INTEL=1
+ BIT=32
+ BIT_OPT=-m32
+ #LOW_ASM_SRC=src/asm/low_x86.asm
+endif
+ifeq ($(ARCH),armv7l)
+ CPU=arm
+ BIT=32
+ #LOW_ASM_SRC=src/asm/low_arm.s
+endif
+ifeq ($(ARCH),aarch64)
+ CPU=aarch64
+ BIT=64
+endif
+ifeq ($(findstring $(OS),mac/mingw64),)
+ LDFLAGS+=-lrt
+endif
+
+CP=cp -f
+AR=ar r
+MKDIR=mkdir -p
+RM=rm -rf
+
+ifeq ($(DEBUG),1)
+ ifeq ($(GCC_EXT),1)
+ CFLAGS+=-fsanitize=address
+ LDFLAGS+=-fsanitize=address
+ endif
+else
+ CFLAGS_OPT+=-fomit-frame-pointer -DNDEBUG
+ ifeq ($(CXX),clang++)
+ CFLAGS_OPT+=-O3
+ else
+ ifeq ($(shell expr $(GCC_VER) \> 4.6.0),1)
+ CFLAGS_OPT+=-Ofast
+ else
+ CFLAGS_OPT+=-O3
+ endif
+ endif
+ ifeq ($(MARCH),)
+ ifeq ($(INTEL),1)
+# CFLAGS_OPT+=-march=native
+ endif
+ else
+ CFLAGS_OPT+=$(MARCH)
+ endif
+endif
+CFLAGS_WARN=-Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith
+CFLAGS+=-g3
+INC_OPT=-I include -I test
+CFLAGS+=$(CFLAGS_WARN) $(BIT_OPT) $(INC_OPT)
+DEBUG=0
+CFLAGS_OPT_USER?=$(CFLAGS_OPT)
+ifeq ($(DEBUG),0)
+CFLAGS+=$(CFLAGS_OPT_USER)
+endif
+CFLAGS+=$(CFLAGS_USER)
+MCL_USE_GMP?=1
+MCL_USE_OPENSSL?=1
+ifeq ($(MCL_USE_GMP),0)
+ CFLAGS+=-DMCL_USE_VINT
+endif
+ifneq ($(MCL_SIZEOF_UNIT),)
+ CFLAGS+=-DMCL_SIZEOF_UNIT=$(MCL_SIZEOF_UNIT)
+endif
+ifeq ($(MCL_USE_OPENSSL),0)
+ CFLAGS+=-DMCL_DONT_USE_OPENSSL
+endif
+ifeq ($(MCL_USE_GMP),1)
+ GMP_LIB=-lgmp -lgmpxx
+endif
+ifeq ($(MCL_USE_OPENSSL),1)
+ OPENSSL_LIB=-lcrypto
+endif
+LDFLAGS+=$(GMP_LIB) $(OPENSSL_LIB) $(BIT_OPT) $(LDFLAGS_USER)
+
+CFLAGS+=-fPIC
+
diff --git a/vendor/github.com/tangerine-network/mcl/common.props b/vendor/github.com/tangerine-network/mcl/common.props
new file mode 100644
index 000000000..912f39e30
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/common.props
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ImportGroup Label="PropertySheets" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <OutDir>$(SolutionDir)bin\</OutDir>
+ </PropertyGroup>
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <AdditionalIncludeDirectories>$(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../xbyak</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <WarningLevel>Level4</WarningLevel>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <PrecompiledHeaderFile />
+ <PrecompiledHeaderOutputFile />
+ <PreprocessorDefinitions>_MBCS;%(PreprocessorDefinitions);NOMINMAX</PreprocessorDefinitions>
+ </ClCompile>
+ <Link>
+ <AdditionalLibraryDirectories>$(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)lib</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup />
+</Project>
diff --git a/vendor/github.com/tangerine-network/mcl/debug.props b/vendor/github.com/tangerine-network/mcl/debug.props
new file mode 100644
index 000000000..1553ae0dc
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/debug.props
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ImportGroup Label="PropertySheets" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <TargetName>$(ProjectName)d</TargetName>
+ </PropertyGroup>
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemGroup />
+</Project> \ No newline at end of file
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/App.config b/vendor/github.com/tangerine-network/mcl/ffi/cs/App.config
new file mode 100644
index 000000000..88fa4027b
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/App.config
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<configuration>
+ <startup>
+ <supportedRuntime version="v4.0" sku=".NETFramework,Version=v4.5.2" />
+ </startup>
+</configuration> \ No newline at end of file
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/Properties/AssemblyInfo.cs b/vendor/github.com/tangerine-network/mcl/ffi/cs/Properties/AssemblyInfo.cs
new file mode 100644
index 000000000..c87e1d44b
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// アセンブリに関する一般情報は以下の属性セットをとおして制御されます。
+// アセンブリに関連付けられている情報を変更するには、
+// これらの属性値を変更してください。
+[assembly: AssemblyTitle("bn256")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("bn256")]
+[assembly: AssemblyCopyright("Copyright © 2017")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// ComVisible を false に設定すると、その型はこのアセンブリ内で COM コンポーネントから
+// 参照不可能になります。COM からこのアセンブリ内の型にアクセスする場合は、
+// その型の ComVisible 属性を true に設定してください。
+[assembly: ComVisible(false)]
+
+// このプロジェクトが COM に公開される場合、次の GUID が typelib の ID になります
+[assembly: Guid("e9d06b1b-ea22-4ef4-ba4b-422f7625966b")]
+
+// アセンブリのバージョン情報は次の 4 つの値で構成されています:
+//
+// メジャー バージョン
+// マイナー バージョン
+// ビルド番号
+// Revision
+//
+// すべての値を指定するか、下のように '*' を使ってビルドおよびリビジョン番号を
+// 既定値にすることができます:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.cs b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.cs
new file mode 100644
index 000000000..0e1ed032c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.cs
@@ -0,0 +1,475 @@
+using System;
+using System.Text;
+using System.Runtime.InteropServices;
+
+namespace mcl {
+ public class BN256 {
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBn_init(int curve, int maxUnitSize);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_clear(ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_setInt(ref Fr y, int x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_setStr(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_isValid(ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_isEqual(ref Fr x, ref Fr y);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_isZero(ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_isOne(ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_setByCSPRNG(ref Fr x);
+
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_setHashOf(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnFr_getStr([Out]StringBuilder buf, long maxBufSize, ref Fr x, int ioMode);
+
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_neg(ref Fr y, ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_inv(ref Fr y, ref Fr x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_add(ref Fr z, ref Fr x, ref Fr y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_sub(ref Fr z, ref Fr x, ref Fr y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_mul(ref Fr z, ref Fr x, ref Fr y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnFr_div(ref Fr z, ref Fr x, ref Fr y);
+
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_clear(ref G1 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG1_setStr(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG1_isValid(ref G1 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG1_isEqual(ref G1 x, ref G1 y);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG1_isZero(ref G1 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG1_hashAndMapTo(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize);
+ [DllImport("mclBn256.dll")]
+ public static extern long mclBnG1_getStr([Out]StringBuilder buf, long maxBufSize, ref G1 x, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_neg(ref G1 y, ref G1 x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_dbl(ref G1 y, ref G1 x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_add(ref G1 z, ref G1 x, ref G1 y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_sub(ref G1 z, ref G1 x, ref G1 y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG1_mul(ref G1 z, ref G1 x, ref Fr y);
+
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_clear(ref G2 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG2_setStr(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG2_isValid(ref G2 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG2_isEqual(ref G2 x, ref G2 y);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG2_isZero(ref G2 x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnG2_hashAndMapTo(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize);
+ [DllImport("mclBn256.dll")]
+ public static extern long mclBnG2_getStr([Out]StringBuilder buf, long maxBufSize, ref G2 x, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_neg(ref G2 y, ref G2 x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_dbl(ref G2 y, ref G2 x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_add(ref G2 z, ref G2 x, ref G2 y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_sub(ref G2 z, ref G2 x, ref G2 y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnG2_mul(ref G2 z, ref G2 x, ref Fr y);
+
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_clear(ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnGT_setStr(ref GT x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnGT_isEqual(ref GT x, ref GT y);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnGT_isZero(ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern int mclBnGT_isOne(ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern long mclBnGT_getStr([Out]StringBuilder buf, long maxBufSize, ref GT x, int ioMode);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_neg(ref GT y, ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_inv(ref GT y, ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_add(ref GT z, ref GT x, ref GT y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_sub(ref GT z, ref GT x, ref GT y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_mul(ref GT z, ref GT x, ref GT y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_div(ref GT z, ref GT x, ref GT y);
+
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBnGT_pow(ref GT z, ref GT x, ref Fr y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBn_pairing(ref GT z, ref G1 x, ref G2 y);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBn_finalExp(ref GT y, ref GT x);
+ [DllImport("mclBn256.dll")]
+ public static extern void mclBn_millerLoop(ref GT z, ref G1 x, ref G2 y);
+
+ public static void init()
+ {
+ const int curveFp254BNb = 0;
+ const int maxUnitSize = 4;
+ if (mclBn_init(curveFp254BNb, maxUnitSize) != 0) {
+ throw new InvalidOperationException("mclBn_init");
+ }
+ }
+ [StructLayout(LayoutKind.Sequential)]
+ public struct Fr {
+ private ulong v0, v1, v2, v3;
+ public void Clear()
+ {
+ mclBnFr_clear(ref this);
+ }
+ public void SetInt(int x)
+ {
+ mclBnFr_setInt(ref this, x);
+ }
+ public void SetStr(string s, int ioMode)
+ {
+ if (mclBnFr_setStr(ref this, s, s.Length, ioMode) != 0) {
+ throw new ArgumentException("mclBnFr_setStr" + s);
+ }
+ }
+ public bool IsValid()
+ {
+ return mclBnFr_isValid(ref this) == 1;
+ }
+ public bool Equals(Fr rhs)
+ {
+ return mclBnFr_isEqual(ref this, ref rhs) == 1;
+ }
+ public bool IsZero()
+ {
+ return mclBnFr_isZero(ref this) == 1;
+ }
+ public bool IsOne()
+ {
+ return mclBnFr_isOne(ref this) == 1;
+ }
+ public void SetByCSPRNG()
+ {
+ mclBnFr_setByCSPRNG(ref this);
+ }
+ public void SetHashOf(String s)
+ {
+ if (mclBnFr_setHashOf(ref this, s, s.Length) != 0) {
+ throw new InvalidOperationException("mclBnFr_setHashOf:" + s);
+ }
+ }
+ public string GetStr(int ioMode)
+ {
+ StringBuilder sb = new StringBuilder(1024);
+ long size = mclBnFr_getStr(sb, sb.Capacity, ref this, ioMode);
+ if (size == 0) {
+ throw new InvalidOperationException("mclBnFr_getStr:");
+ }
+ return sb.ToString();
+ }
+ public void Neg(Fr x)
+ {
+ mclBnFr_neg(ref this, ref x);
+ }
+ public void Inv(Fr x)
+ {
+ mclBnFr_inv(ref this, ref x);
+ }
+ public void Add(Fr x, Fr y)
+ {
+ mclBnFr_add(ref this, ref x, ref y);
+ }
+ public void Sub(Fr x, Fr y)
+ {
+ mclBnFr_sub(ref this, ref x, ref y);
+ }
+ public void Mul(Fr x, Fr y)
+ {
+ mclBnFr_mul(ref this, ref x, ref y);
+ }
+ public void Div(Fr x, Fr y)
+ {
+ mclBnFr_div(ref this, ref x, ref y);
+ }
+ public static Fr operator -(Fr x)
+ {
+ Fr y = new Fr();
+ y.Neg(x);
+ return y;
+ }
+ public static Fr operator +(Fr x, Fr y)
+ {
+ Fr z = new Fr();
+ z.Add(x, y);
+ return z;
+ }
+ public static Fr operator -(Fr x, Fr y)
+ {
+ Fr z = new Fr();
+ z.Sub(x, y);
+ return z;
+ }
+ public static Fr operator *(Fr x, Fr y)
+ {
+ Fr z = new Fr();
+ z.Mul(x, y);
+ return z;
+ }
+ public static Fr operator /(Fr x, Fr y)
+ {
+ Fr z = new Fr();
+ z.Div(x, y);
+ return z;
+ }
+ }
+ [StructLayout(LayoutKind.Sequential)]
+ public struct G1 {
+ private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11;
+ public void Clear()
+ {
+ mclBnG1_clear(ref this);
+ }
+ public void setStr(String s, int ioMode)
+ {
+ if (mclBnG1_setStr(ref this, s, s.Length, ioMode) != 0) {
+ throw new ArgumentException("mclBnG1_setStr:" + s);
+ }
+ }
+ public bool IsValid()
+ {
+ return mclBnG1_isValid(ref this) == 1;
+ }
+ public bool Equals(G1 rhs)
+ {
+ return mclBnG1_isEqual(ref this, ref rhs) == 1;
+ }
+ public bool IsZero()
+ {
+ return mclBnG1_isZero(ref this) == 1;
+ }
+ public void HashAndMapTo(String s)
+ {
+ if (mclBnG1_hashAndMapTo(ref this, s, s.Length) != 0) {
+ throw new ArgumentException("mclBnG1_hashAndMapTo:" + s);
+ }
+ }
+ public string GetStr(int ioMode)
+ {
+ StringBuilder sb = new StringBuilder(1024);
+ long size = mclBnG1_getStr(sb, sb.Capacity, ref this, ioMode);
+ if (size == 0) {
+ throw new InvalidOperationException("mclBnG1_getStr:");
+ }
+ return sb.ToString();
+ }
+ public void Neg(G1 x)
+ {
+ mclBnG1_neg(ref this, ref x);
+ }
+ public void Dbl(G1 x)
+ {
+ mclBnG1_dbl(ref this, ref x);
+ }
+ public void Add(G1 x, G1 y)
+ {
+ mclBnG1_add(ref this, ref x, ref y);
+ }
+ public void Sub(G1 x, G1 y)
+ {
+ mclBnG1_sub(ref this, ref x, ref y);
+ }
+ public void Mul(G1 x, Fr y)
+ {
+ mclBnG1_mul(ref this, ref x, ref y);
+ }
+ }
+ [StructLayout(LayoutKind.Sequential)]
+ public struct G2 {
+ private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11;
+ private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23;
+ public void Clear()
+ {
+ mclBnG2_clear(ref this);
+ }
+ public void setStr(String s, int ioMode)
+ {
+ if (mclBnG2_setStr(ref this, s, s.Length, ioMode) != 0) {
+ throw new ArgumentException("mclBnG2_setStr:" + s);
+ }
+ }
+ public bool IsValid()
+ {
+ return mclBnG2_isValid(ref this) == 1;
+ }
+ public bool Equals(G2 rhs)
+ {
+ return mclBnG2_isEqual(ref this, ref rhs) == 1;
+ }
+ public bool IsZero()
+ {
+ return mclBnG2_isZero(ref this) == 1;
+ }
+ public void HashAndMapTo(String s)
+ {
+ if (mclBnG2_hashAndMapTo(ref this, s, s.Length) != 0) {
+ throw new ArgumentException("mclBnG2_hashAndMapTo:" + s);
+ }
+ }
+ public string GetStr(int ioMode)
+ {
+ StringBuilder sb = new StringBuilder(1024);
+ long size = mclBnG2_getStr(sb, sb.Capacity, ref this, ioMode);
+ if (size == 0) {
+ throw new InvalidOperationException("mclBnG2_getStr:");
+ }
+ return sb.ToString();
+ }
+ public void Neg(G2 x)
+ {
+ mclBnG2_neg(ref this, ref x);
+ }
+ public void Dbl(G2 x)
+ {
+ mclBnG2_dbl(ref this, ref x);
+ }
+ public void Add(G2 x, G2 y)
+ {
+ mclBnG2_add(ref this, ref x, ref y);
+ }
+ public void Sub(G2 x, G2 y)
+ {
+ mclBnG2_sub(ref this, ref x, ref y);
+ }
+ public void Mul(G2 x, Fr y)
+ {
+ mclBnG2_mul(ref this, ref x, ref y);
+ }
+ }
+ [StructLayout(LayoutKind.Sequential)]
+ public struct GT {
+ private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11;
+ private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23;
+ private ulong v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35;
+ private ulong v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47;
+ public void Clear()
+ {
+ mclBnGT_clear(ref this);
+ }
+ public void setStr(String s, int ioMode)
+ {
+ if (mclBnGT_setStr(ref this, s, s.Length, ioMode) != 0) {
+ throw new ArgumentException("mclBnGT_setStr:" + s);
+ }
+ }
+ public bool Equals(GT rhs)
+ {
+ return mclBnGT_isEqual(ref this, ref rhs) == 1;
+ }
+ public bool IsZero()
+ {
+ return mclBnGT_isZero(ref this) == 1;
+ }
+ public bool IsOne()
+ {
+ return mclBnGT_isOne(ref this) == 1;
+ }
+ public string GetStr(int ioMode)
+ {
+ StringBuilder sb = new StringBuilder(1024);
+ long size = mclBnGT_getStr(sb, sb.Capacity, ref this, ioMode);
+ if (size == 0) {
+ throw new InvalidOperationException("mclBnGT_getStr:");
+ }
+ return sb.ToString();
+ }
+ public void Neg(GT x)
+ {
+ mclBnGT_neg(ref this, ref x);
+ }
+ public void Inv(GT x)
+ {
+ mclBnGT_inv(ref this, ref x);
+ }
+ public void Add(GT x, GT y)
+ {
+ mclBnGT_add(ref this, ref x, ref y);
+ }
+ public void Sub(GT x, GT y)
+ {
+ mclBnGT_sub(ref this, ref x, ref y);
+ }
+ public void Mul(GT x, GT y)
+ {
+ mclBnGT_mul(ref this, ref x, ref y);
+ }
+ public void Div(GT x, GT y)
+ {
+ mclBnGT_div(ref this, ref x, ref y);
+ }
+ public static GT operator -(GT x)
+ {
+ GT y = new GT();
+ y.Neg(x);
+ return y;
+ }
+ public static GT operator +(GT x, GT y)
+ {
+ GT z = new GT();
+ z.Add(x, y);
+ return z;
+ }
+ public static GT operator -(GT x, GT y)
+ {
+ GT z = new GT();
+ z.Sub(x, y);
+ return z;
+ }
+ public static GT operator *(GT x, GT y)
+ {
+ GT z = new GT();
+ z.Mul(x, y);
+ return z;
+ }
+ public static GT operator /(GT x, GT y)
+ {
+ GT z = new GT();
+ z.Div(x, y);
+ return z;
+ }
+ public void Pow(GT x, Fr y)
+ {
+ mclBnGT_pow(ref this, ref x, ref y);
+ }
+ public void Pairing(G1 x, G2 y)
+ {
+ mclBn_pairing(ref this, ref x, ref y);
+ }
+ public void FinalExp(GT x)
+ {
+ mclBn_finalExp(ref this, ref x);
+ }
+ public void MillerLoop(G1 x, G2 y)
+ {
+ mclBn_millerLoop(ref this, ref x, ref y);
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.csproj b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.csproj
new file mode 100644
index 000000000..21a049f01
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.csproj
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <ProjectGuid>{E9D06B1B-EA22-4EF4-BA4B-422F7625966B}</ProjectGuid>
+ <OutputType>Exe</OutputType>
+ <AppDesignerFolder>Properties</AppDesignerFolder>
+ <RootNamespace>bn256</RootNamespace>
+ <AssemblyName>bn256</AssemblyName>
+ <TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
+ <FileAlignment>512</FileAlignment>
+ <AutoGenerateBindingRedirects>true</AutoGenerateBindingRedirects>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|x64'">
+ <DebugSymbols>true</DebugSymbols>
+ <OutputPath>..\..\bin\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <AllowUnsafeBlocks>false</AllowUnsafeBlocks>
+ <DebugType>full</DebugType>
+ <PlatformTarget>x64</PlatformTarget>
+ <ErrorReport>prompt</ErrorReport>
+ <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Release|x64'">
+ <OutputPath>..\..\bin\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <Optimize>true</Optimize>
+ <DebugType>pdbonly</DebugType>
+ <PlatformTarget>x64</PlatformTarget>
+ <ErrorReport>prompt</ErrorReport>
+ <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
+ <Prefer32Bit>true</Prefer32Bit>
+ </PropertyGroup>
+ <ItemGroup>
+ <Reference Include="System" />
+ <Reference Include="System.Core" />
+ <Reference Include="System.Xml.Linq" />
+ <Reference Include="System.Data.DataSetExtensions" />
+ <Reference Include="Microsoft.CSharp" />
+ <Reference Include="System.Data" />
+ <Reference Include="System.Net.Http" />
+ <Reference Include="System.Xml" />
+ </ItemGroup>
+ <ItemGroup>
+ <Compile Include="bn256.cs" />
+ <Compile Include="bn256_test.cs" />
+ <Compile Include="Properties\AssemblyInfo.cs" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="App.config" />
+ </ItemGroup>
+ <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+ <!-- To modify your build process, add your task inside one of the targets below and uncomment it.
+ Other similar extension points exist, see Microsoft.Common.targets.
+ <Target Name="BeforeBuild">
+ </Target>
+ <Target Name="AfterBuild">
+ </Target>
+ -->
+</Project> \ No newline at end of file
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.sln b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.sln
new file mode 100644
index 000000000..6e6aa67ee
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256.sln
@@ -0,0 +1,22 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25420.1
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "bn256", "bn256.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966B}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.ActiveCfg = Debug|x64
+ {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.Build.0 = Debug|x64
+ {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.ActiveCfg = Release|x64
+ {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256_test.cs b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256_test.cs
new file mode 100644
index 000000000..cad8c03d3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/cs/bn256_test.cs
@@ -0,0 +1,149 @@
+using System;
+
+namespace mcl {
+ using static BN256;
+ class BN256Test {
+ static int err = 0;
+ static void assert(string msg, bool b)
+ {
+ if (b) return;
+ Console.WriteLine("ERR {0}", msg);
+ err++;
+ }
+ static void Main(string[] args)
+ {
+ try {
+ assert("64bit system", System.Environment.Is64BitProcess);
+ init();
+ TestFr();
+ TestG1();
+ TestG2();
+ TestPairing();
+ if (err == 0) {
+ Console.WriteLine("all tests succeed");
+ } else {
+ Console.WriteLine("err={0}", err);
+ }
+ } catch (Exception e) {
+ Console.WriteLine("ERR={0}", e);
+ }
+ }
+ static void TestFr()
+ {
+ Console.WriteLine("TestFr");
+ Fr x = new Fr();
+ x.Clear();
+ assert("0", x.GetStr(10) == "0");
+ assert("0.IzZero", x.IsZero());
+ assert("!0.IzOne", !x.IsOne());
+ x.SetInt(1);
+ assert("1", x.GetStr(10) == "1");
+ assert("!1.IzZero", !x.IsZero());
+ assert("1.IzOne", x.IsOne());
+ x.SetInt(3);
+ assert("3", x.GetStr(10) == "3");
+ assert("!3.IzZero", !x.IsZero());
+ assert("!3.IzOne", !x.IsOne());
+ x.SetInt(-5);
+ x = -x;
+ assert("5", x.GetStr(10) == "5");
+ x.SetInt(4);
+ x = x * x;
+ assert("16", x.GetStr(10) == "16");
+ assert("10", x.GetStr(16) == "10");
+ Fr y;
+ y = x;
+ assert("x == y", x.Equals(y));
+ x.SetInt(123);
+ assert("123", x.GetStr(10) == "123");
+ assert("7b", x.GetStr(16) == "7b");
+ assert("y != x", !x.Equals(y));
+ try {
+ x.SetStr("1234567891234x", 10);
+ Console.WriteLine("x = {0}", x);
+ } catch (Exception e) {
+ Console.WriteLine("exception test OK\n'{0}'", e);
+ }
+ x.SetStr("1234567891234", 10);
+ assert("1234567891234", x.GetStr(10) == "1234567891234");
+ }
+ static void TestG1()
+ {
+ Console.WriteLine("TestG1");
+ G1 P = new G1();
+ P.Clear();
+ assert("P.IsValid", P.IsValid());
+ assert("P.IsZero", P.IsZero());
+ P.HashAndMapTo("abc");
+ assert("P.IsValid", P.IsValid());
+ assert("!P.IsZero", !P.IsZero());
+ G1 Q = new G1();
+ Q = P;
+ assert("P == Q", Q.Equals(P));
+ Q.Neg(P);
+ Q.Add(Q, P);
+ assert("P = Q", Q.IsZero());
+ Q.Dbl(P);
+ G1 R = new G1();
+ R.Add(P, P);
+ assert("Q == R", Q.Equals(R));
+ Fr x = new Fr();
+ x.SetInt(3);
+ R.Add(R, P);
+ Q.Mul(P, x);
+ assert("Q == R", Q.Equals(R));
+ }
+ static void TestG2()
+ {
+ Console.WriteLine("TestG2");
+ G2 P = new G2();
+ P.Clear();
+ assert("P is valid", P.IsValid());
+ assert("P is zero", P.IsZero());
+ P.HashAndMapTo("abc");
+ assert("P is valid", P.IsValid());
+ assert("P is not zero", !P.IsZero());
+ G2 Q = new G2();
+ Q = P;
+ assert("P == Q", Q.Equals(P));
+ Q.Neg(P);
+ Q.Add(Q, P);
+ assert("Q is zero", Q.IsZero());
+ Q.Dbl(P);
+ G2 R = new G2();
+ R.Add(P, P);
+ assert("Q == R", Q.Equals(R));
+ Fr x = new Fr();
+ x.SetInt(3);
+ R.Add(R, P);
+ Q.Mul(P, x);
+ assert("Q == R", Q.Equals(R));
+ }
+ static void TestPairing()
+ {
+ Console.WriteLine("TestG2");
+ G1 P = new G1();
+ P.HashAndMapTo("123");
+ G2 Q = new G2();
+ Q.HashAndMapTo("1");
+ Fr a = new Fr();
+ Fr b = new Fr();
+ a.SetStr("12345678912345673453", 10);
+ b.SetStr("230498230982394243424", 10);
+ G1 aP = new G1();
+ G2 bQ = new G2();
+ aP.Mul(P, a);
+ bQ.Mul(Q, b);
+ GT e1 = new GT();
+ GT e2 = new GT();
+ GT e3 = new GT();
+ e1.Pairing(P, Q);
+ e2.Pairing(aP, Q);
+ e3.Pow(e1, a);
+ assert("e2.Equals(e3)", e2.Equals(e3));
+ e2.Pairing(P, bQ);
+ e3.Pow(e1, b);
+ assert("e2.Equals(e3)", e2.Equals(e3));
+ }
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl.go b/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl.go
new file mode 100644
index 000000000..a0c8bb4d3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl.go
@@ -0,0 +1,659 @@
+package mcl
+
+/*
+#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4
+#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6
+#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4
+#cgo bn256 LDFLAGS:-lmclbn256 -lmcl
+#cgo bn384 LDFLAGS:-lmclbn384 -lmcl
+#cgo bn384_256 LDFLAGS:-lmclbn384_256 -lmcl
+#include <mcl/bn.h>
+*/
+import "C"
+import "fmt"
+import "unsafe"
+
+// CurveFp254BNb -- 254 bit curve
+const CurveFp254BNb = C.mclBn_CurveFp254BNb
+
+// CurveFp382_1 -- 382 bit curve 1
+const CurveFp382_1 = C.mclBn_CurveFp382_1
+
+// CurveFp382_2 -- 382 bit curve 2
+const CurveFp382_2 = C.mclBn_CurveFp382_2
+
+// BLS12_381
+const BLS12_381 = C.MCL_BLS12_381
+
+// IoSerializeHexStr
+const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR
+
+// Init --
+// call this function before calling all the other operations
+// this function is not thread safe
+func Init(curve int) error {
+ err := C.mclBn_init(C.int(curve), C.MCLBN_COMPILED_TIME_VAR)
+ if err != 0 {
+ return fmt.Errorf("ERR mclBn_init curve=%d", curve)
+ }
+ return nil
+}
+
+// GetFrUnitSize() --
+func GetFrUnitSize() int {
+ return int(C.MCLBN_FR_UNIT_SIZE)
+}
+
+// GetFpUnitSize() --
+// same as GetMaxOpUnitSize()
+func GetFpUnitSize() int {
+ return int(C.MCLBN_FP_UNIT_SIZE)
+}
+
+// GetMaxOpUnitSize --
+func GetMaxOpUnitSize() int {
+ return int(C.MCLBN_FP_UNIT_SIZE)
+}
+
+// GetOpUnitSize --
+// the length of Fr is GetOpUnitSize() * 8 bytes
+func GetOpUnitSize() int {
+ return int(C.mclBn_getOpUnitSize())
+}
+
+// GetCurveOrder --
+// return the order of G1
+func GetCurveOrder() string {
+ buf := make([]byte, 1024)
+ // #nosec
+ n := C.mclBn_getCurveOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))
+ if n == 0 {
+ panic("implementation err. size of buf is small")
+ }
+ return string(buf[:n])
+}
+
+// GetFieldOrder --
+// return the characteristic of the field where a curve is defined
+func GetFieldOrder() string {
+ buf := make([]byte, 1024)
+ // #nosec
+ n := C.mclBn_getFieldOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))
+ if n == 0 {
+ panic("implementation err. size of buf is small")
+ }
+ return string(buf[:n])
+}
+
+// Fr --
+type Fr struct {
+ v C.mclBnFr
+}
+
+// getPointer --
+func (x *Fr) getPointer() (p *C.mclBnFr) {
+ // #nosec
+ return (*C.mclBnFr)(unsafe.Pointer(x))
+}
+
+// Clear --
+func (x *Fr) Clear() {
+ // #nosec
+ C.mclBnFr_clear(x.getPointer())
+}
+
+// SetInt64 --
+func (x *Fr) SetInt64(v int64) {
+ // #nosec
+ C.mclBnFr_setInt(x.getPointer(), C.int64_t(v))
+}
+
+// SetString --
+func (x *Fr) SetString(s string, base int) error {
+ buf := []byte(s)
+ // #nosec
+ err := C.mclBnFr_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base))
+ if err != 0 {
+ return fmt.Errorf("err mclBnFr_setStr %x", err)
+ }
+ return nil
+}
+
+// Deserialize --
+func (x *Fr) Deserialize(buf []byte) error {
+ // #nosec
+ err := C.mclBnFr_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err == 0 {
+ return fmt.Errorf("err mclBnFr_deserialize %x", buf)
+ }
+ return nil
+}
+
+// SetLittleEndian --
+func (x *Fr) SetLittleEndian(buf []byte) error {
+ // #nosec
+ err := C.mclBnFr_setLittleEndian(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err != 0 {
+ return fmt.Errorf("err mclBnFr_setLittleEndian %x", err)
+ }
+ return nil
+}
+
+// IsEqual --
+func (x *Fr) IsEqual(rhs *Fr) bool {
+ return C.mclBnFr_isEqual(x.getPointer(), rhs.getPointer()) == 1
+}
+
+// IsZero --
+func (x *Fr) IsZero() bool {
+ return C.mclBnFr_isZero(x.getPointer()) == 1
+}
+
+// IsOne --
+func (x *Fr) IsOne() bool {
+ return C.mclBnFr_isOne(x.getPointer()) == 1
+}
+
+// SetByCSPRNG --
+func (x *Fr) SetByCSPRNG() {
+ err := C.mclBnFr_setByCSPRNG(x.getPointer())
+ if err != 0 {
+ panic("err mclBnFr_setByCSPRNG")
+ }
+}
+
+// SetHashOf --
+func (x *Fr) SetHashOf(buf []byte) bool {
+ // #nosec
+ return C.mclBnFr_setHashOf(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 0
+}
+
+// GetString --
+func (x *Fr) GetString(base int) string {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnFr_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base))
+ if n == 0 {
+ panic("err mclBnFr_getStr")
+ }
+ return string(buf[:n])
+}
+
+// Serialize --
+func (x *Fr) Serialize() []byte {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnFr_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer())
+ if n == 0 {
+ panic("err mclBnFr_serialize")
+ }
+ return buf[:n]
+}
+
+// FrNeg --
+func FrNeg(out *Fr, x *Fr) {
+ C.mclBnFr_neg(out.getPointer(), x.getPointer())
+}
+
+// FrInv --
+func FrInv(out *Fr, x *Fr) {
+ C.mclBnFr_inv(out.getPointer(), x.getPointer())
+}
+
+// FrAdd --
+func FrAdd(out *Fr, x *Fr, y *Fr) {
+ C.mclBnFr_add(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// FrSub --
+func FrSub(out *Fr, x *Fr, y *Fr) {
+ C.mclBnFr_sub(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// FrMul --
+func FrMul(out *Fr, x *Fr, y *Fr) {
+ C.mclBnFr_mul(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// FrDiv --
+func FrDiv(out *Fr, x *Fr, y *Fr) {
+ C.mclBnFr_div(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G1 --
+type G1 struct {
+ v C.mclBnG1
+}
+
+// getPointer --
+func (x *G1) getPointer() (p *C.mclBnG1) {
+ // #nosec
+ return (*C.mclBnG1)(unsafe.Pointer(x))
+}
+
+// Clear --
+func (x *G1) Clear() {
+ // #nosec
+ C.mclBnG1_clear(x.getPointer())
+}
+
+// SetString --
+func (x *G1) SetString(s string, base int) error {
+ buf := []byte(s)
+ // #nosec
+ err := C.mclBnG1_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base))
+ if err != 0 {
+ return fmt.Errorf("err mclBnG1_setStr %x", err)
+ }
+ return nil
+}
+
+// Deserialize --
+func (x *G1) Deserialize(buf []byte) error {
+ // #nosec
+ err := C.mclBnG1_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err == 0 {
+ return fmt.Errorf("err mclBnG1_deserialize %x", buf)
+ }
+ return nil
+}
+
+// IsEqual --
+func (x *G1) IsEqual(rhs *G1) bool {
+ return C.mclBnG1_isEqual(x.getPointer(), rhs.getPointer()) == 1
+}
+
+// IsZero --
+func (x *G1) IsZero() bool {
+ return C.mclBnG1_isZero(x.getPointer()) == 1
+}
+
+// HashAndMapTo --
+func (x *G1) HashAndMapTo(buf []byte) error {
+ // #nosec
+ err := C.mclBnG1_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err != 0 {
+ return fmt.Errorf("err mclBnG1_hashAndMapTo %x", err)
+ }
+ return nil
+}
+
+// GetString --
+func (x *G1) GetString(base int) string {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnG1_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base))
+ if n == 0 {
+ panic("err mclBnG1_getStr")
+ }
+ return string(buf[:n])
+}
+
+// Serialize --
+func (x *G1) Serialize() []byte {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnG1_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer())
+ if n == 0 {
+ panic("err mclBnG1_serialize")
+ }
+ return buf[:n]
+}
+
+// G1Neg --
+func G1Neg(out *G1, x *G1) {
+ C.mclBnG1_neg(out.getPointer(), x.getPointer())
+}
+
+// G1Dbl --
+func G1Dbl(out *G1, x *G1) {
+ C.mclBnG1_dbl(out.getPointer(), x.getPointer())
+}
+
+// G1Add --
+func G1Add(out *G1, x *G1, y *G1) {
+ C.mclBnG1_add(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G1Sub --
+func G1Sub(out *G1, x *G1, y *G1) {
+ C.mclBnG1_sub(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G1Mul --
+func G1Mul(out *G1, x *G1, y *Fr) {
+ C.mclBnG1_mul(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G1MulCT -- constant time (depending on bit lengh of y)
+func G1MulCT(out *G1, x *G1, y *Fr) {
+ C.mclBnG1_mulCT(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G2 --
+type G2 struct {
+ v C.mclBnG2
+}
+
+// getPointer --
+func (x *G2) getPointer() (p *C.mclBnG2) {
+ // #nosec
+ return (*C.mclBnG2)(unsafe.Pointer(x))
+}
+
+// Clear --
+func (x *G2) Clear() {
+ // #nosec
+ C.mclBnG2_clear(x.getPointer())
+}
+
+// SetString --
+func (x *G2) SetString(s string, base int) error {
+ buf := []byte(s)
+ // #nosec
+ err := C.mclBnG2_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base))
+ if err != 0 {
+ return fmt.Errorf("err mclBnG2_setStr %x", err)
+ }
+ return nil
+}
+
+// Deserialize --
+func (x *G2) Deserialize(buf []byte) error {
+ // #nosec
+ err := C.mclBnG2_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err == 0 {
+ return fmt.Errorf("err mclBnG2_deserialize %x", buf)
+ }
+ return nil
+}
+
+// IsEqual --
+func (x *G2) IsEqual(rhs *G2) bool {
+ return C.mclBnG2_isEqual(x.getPointer(), rhs.getPointer()) == 1
+}
+
+// IsZero --
+func (x *G2) IsZero() bool {
+ return C.mclBnG2_isZero(x.getPointer()) == 1
+}
+
+// HashAndMapTo --
+func (x *G2) HashAndMapTo(buf []byte) error {
+ // #nosec
+ err := C.mclBnG2_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err != 0 {
+ return fmt.Errorf("err mclBnG2_hashAndMapTo %x", err)
+ }
+ return nil
+}
+
+// GetString --
+func (x *G2) GetString(base int) string {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnG2_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base))
+ if n == 0 {
+ panic("err mclBnG2_getStr")
+ }
+ return string(buf[:n])
+}
+
+// Serialize --
+func (x *G2) Serialize() []byte {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnG2_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer())
+ if n == 0 {
+ panic("err mclBnG2_serialize")
+ }
+ return buf[:n]
+}
+
+// G2Neg --
+func G2Neg(out *G2, x *G2) {
+ C.mclBnG2_neg(out.getPointer(), x.getPointer())
+}
+
+// G2Dbl --
+func G2Dbl(out *G2, x *G2) {
+ C.mclBnG2_dbl(out.getPointer(), x.getPointer())
+}
+
+// G2Add --
+func G2Add(out *G2, x *G2, y *G2) {
+ C.mclBnG2_add(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G2Sub --
+func G2Sub(out *G2, x *G2, y *G2) {
+ C.mclBnG2_sub(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// G2Mul --
+func G2Mul(out *G2, x *G2, y *Fr) {
+ C.mclBnG2_mul(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GT --
+type GT struct {
+ v C.mclBnGT
+}
+
+// getPointer --
+func (x *GT) getPointer() (p *C.mclBnGT) {
+ // #nosec
+ return (*C.mclBnGT)(unsafe.Pointer(x))
+}
+
+// Clear --
+func (x *GT) Clear() {
+ // #nosec
+ C.mclBnGT_clear(x.getPointer())
+}
+
+// SetInt64 --
+func (x *GT) SetInt64(v int64) {
+ // #nosec
+ C.mclBnGT_setInt(x.getPointer(), C.int64_t(v))
+}
+
+// SetString --
+func (x *GT) SetString(s string, base int) error {
+ buf := []byte(s)
+ // #nosec
+ err := C.mclBnGT_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base))
+ if err != 0 {
+ return fmt.Errorf("err mclBnGT_setStr %x", err)
+ }
+ return nil
+}
+
+// Deserialize --
+func (x *GT) Deserialize(buf []byte) error {
+ // #nosec
+ err := C.mclBnGT_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf)))
+ if err == 0 {
+ return fmt.Errorf("err mclBnGT_deserialize %x", buf)
+ }
+ return nil
+}
+
+// IsEqual --
+func (x *GT) IsEqual(rhs *GT) bool {
+ return C.mclBnGT_isEqual(x.getPointer(), rhs.getPointer()) == 1
+}
+
+// IsZero --
+func (x *GT) IsZero() bool {
+ return C.mclBnGT_isZero(x.getPointer()) == 1
+}
+
+// IsOne --
+func (x *GT) IsOne() bool {
+ return C.mclBnGT_isOne(x.getPointer()) == 1
+}
+
+// GetString --
+func (x *GT) GetString(base int) string {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnGT_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base))
+ if n == 0 {
+ panic("err mclBnGT_getStr")
+ }
+ return string(buf[:n])
+}
+
+// Serialize --
+func (x *GT) Serialize() []byte {
+ buf := make([]byte, 2048)
+ // #nosec
+ n := C.mclBnGT_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer())
+ if n == 0 {
+ panic("err mclBnGT_serialize")
+ }
+ return buf[:n]
+}
+
+// GTNeg --
+func GTNeg(out *GT, x *GT) {
+ C.mclBnGT_neg(out.getPointer(), x.getPointer())
+}
+
+// GTInv --
+func GTInv(out *GT, x *GT) {
+ C.mclBnGT_inv(out.getPointer(), x.getPointer())
+}
+
+// GTAdd --
+func GTAdd(out *GT, x *GT, y *GT) {
+ C.mclBnGT_add(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GTSub --
+func GTSub(out *GT, x *GT, y *GT) {
+ C.mclBnGT_sub(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GTMul --
+func GTMul(out *GT, x *GT, y *GT) {
+ C.mclBnGT_mul(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GTDiv --
+func GTDiv(out *GT, x *GT, y *GT) {
+ C.mclBnGT_div(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GTPow --
+func GTPow(out *GT, x *GT, y *Fr) {
+ C.mclBnGT_pow(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// Pairing --
+func Pairing(out *GT, x *G1, y *G2) {
+ C.mclBn_pairing(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// FinalExp --
+func FinalExp(out *GT, x *GT) {
+ C.mclBn_finalExp(out.getPointer(), x.getPointer())
+}
+
+// MillerLoop --
+func MillerLoop(out *GT, x *G1, y *G2) {
+ C.mclBn_millerLoop(out.getPointer(), x.getPointer(), y.getPointer())
+}
+
+// GetUint64NumToPrecompute --
+func GetUint64NumToPrecompute() int {
+ return int(C.mclBn_getUint64NumToPrecompute())
+}
+
+// PrecomputeG2 --
+func PrecomputeG2(Qbuf []uint64, Q *G2) {
+ // #nosec
+ C.mclBn_precomputeG2((*C.uint64_t)(unsafe.Pointer(&Qbuf[0])), Q.getPointer())
+}
+
+// PrecomputedMillerLoop --
+func PrecomputedMillerLoop(out *GT, P *G1, Qbuf []uint64) {
+ // #nosec
+ C.mclBn_precomputedMillerLoop(out.getPointer(), P.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Qbuf[0])))
+}
+
+// PrecomputedMillerLoop2 --
+func PrecomputedMillerLoop2(out *GT, P1 *G1, Q1buf []uint64, P2 *G1, Q2buf []uint64) {
+ // #nosec
+ C.mclBn_precomputedMillerLoop2(out.getPointer(), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])))
+}
+
+// FrEvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ...
+func FrEvaluatePolynomial(y *Fr, c []Fr, x *Fr) error {
+ // #nosec
+ err := C.mclBn_FrEvaluatePolynomial(y.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer())
+ if err != 0 {
+ return fmt.Errorf("err mclBn_FrEvaluatePolynomial")
+ }
+ return nil
+}
+
+// G1EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ...
+func G1EvaluatePolynomial(y *G1, c []G1, x *Fr) error {
+ // #nosec
+ err := C.mclBn_G1EvaluatePolynomial(y.getPointer(), (*C.mclBnG1)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer())
+ if err != 0 {
+ return fmt.Errorf("err mclBn_G1EvaluatePolynomial")
+ }
+ return nil
+}
+
+// G2EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ...
+func G2EvaluatePolynomial(y *G2, c []G2, x *Fr) error {
+ // #nosec
+ err := C.mclBn_G2EvaluatePolynomial(y.getPointer(), (*C.mclBnG2)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer())
+ if err != 0 {
+ return fmt.Errorf("err mclBn_G2EvaluatePolynomial")
+ }
+ return nil
+}
+
+// FrLagrangeInterpolation --
+func FrLagrangeInterpolation(out *Fr, xVec []Fr, yVec []Fr) error {
+ if len(xVec) != len(yVec) {
+ return fmt.Errorf("err FrLagrangeInterpolation:bad size")
+ }
+ // #nosec
+ err := C.mclBn_FrLagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnFr)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec)))
+ if err != 0 {
+ return fmt.Errorf("err FrLagrangeInterpolation")
+ }
+ return nil
+}
+
+// G1LagrangeInterpolation --
+func G1LagrangeInterpolation(out *G1, xVec []Fr, yVec []G1) error {
+ if len(xVec) != len(yVec) {
+ return fmt.Errorf("err G1LagrangeInterpolation:bad size")
+ }
+ // #nosec
+ err := C.mclBn_G1LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG1)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec)))
+ if err != 0 {
+ return fmt.Errorf("err G1LagrangeInterpolation")
+ }
+ return nil
+}
+
+// G2LagrangeInterpolation --
+func G2LagrangeInterpolation(out *G2, xVec []Fr, yVec []G2) error {
+ if len(xVec) != len(yVec) {
+ return fmt.Errorf("err G2LagrangeInterpolation:bad size")
+ }
+ // #nosec
+ err := C.mclBn_G2LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG2)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec)))
+ if err != 0 {
+ return fmt.Errorf("err G2LagrangeInterpolation")
+ }
+ return nil
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl_test.go b/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl_test.go
new file mode 100644
index 000000000..16bb6910f
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/go/mcl/mcl_test.go
@@ -0,0 +1,157 @@
+package mcl
+
+import "testing"
+import "fmt"
+
+func testBadPointOfG2(t *testing.T) {
+ var Q G2
+ // this value is not in G2 so should return an error
+ err := Q.SetString("1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50", 16)
+ if err == nil {
+ t.Error(err)
+ }
+}
+
+func testGT(t *testing.T) {
+ var x GT
+ x.Clear()
+ if !x.IsZero() {
+ t.Errorf("not zero")
+ }
+ x.SetInt64(1)
+ if !x.IsOne() {
+ t.Errorf("not one")
+ }
+}
+
+func testHash(t *testing.T) {
+ var x Fr
+ if !x.SetHashOf([]byte("abc")) {
+ t.Error("SetHashOf")
+ }
+ fmt.Printf("x=%s\n", x.GetString(16))
+}
+
+func testNegAdd(t *testing.T) {
+ var x Fr
+ var P1, P2, P3 G1
+ var Q1, Q2, Q3 G2
+ err := P1.HashAndMapTo([]byte("this"))
+ if err != nil {
+ t.Error(err)
+ }
+ err = Q1.HashAndMapTo([]byte("this"))
+ if err != nil {
+ t.Error(err)
+ }
+ fmt.Printf("P1=%s\n", P1.GetString(16))
+ fmt.Printf("Q1=%s\n", Q1.GetString(16))
+ G1Neg(&P2, &P1)
+ G2Neg(&Q2, &Q1)
+ fmt.Printf("P2=%s\n", P2.GetString(16))
+ fmt.Printf("Q2=%s\n", Q2.GetString(16))
+
+ x.SetInt64(-1)
+ G1Mul(&P3, &P1, &x)
+ G2Mul(&Q3, &Q1, &x)
+ if !P2.IsEqual(&P3) {
+ t.Errorf("P2 != P3 %s\n", P3.GetString(16))
+ }
+ if !Q2.IsEqual(&Q3) {
+ t.Errorf("Q2 != Q3 %s\n", Q3.GetString(16))
+ }
+
+ G1Add(&P2, &P2, &P1)
+ G2Add(&Q2, &Q2, &Q1)
+ if !P2.IsZero() {
+ t.Errorf("P2 is not zero %s\n", P2.GetString(16))
+ }
+ if !Q2.IsZero() {
+ t.Errorf("Q2 is not zero %s\n", Q2.GetString(16))
+ }
+}
+
+func testPairing(t *testing.T) {
+ var a, b, ab Fr
+ err := a.SetString("123", 10)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ err = b.SetString("456", 10)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ FrMul(&ab, &a, &b)
+ var P, aP G1
+ var Q, bQ G2
+ err = P.HashAndMapTo([]byte("this"))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ fmt.Printf("P=%s\n", P.GetString(16))
+ G1Mul(&aP, &P, &a)
+ fmt.Printf("aP=%s\n", aP.GetString(16))
+ err = Q.HashAndMapTo([]byte("that"))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ fmt.Printf("Q=%s\n", Q.GetString(16))
+ G2Mul(&bQ, &Q, &b)
+ fmt.Printf("bQ=%s\n", bQ.GetString(16))
+ var e1, e2 GT
+ Pairing(&e1, &P, &Q)
+ fmt.Printf("e1=%s\n", e1.GetString(16))
+ Pairing(&e2, &aP, &bQ)
+ fmt.Printf("e2=%s\n", e1.GetString(16))
+ GTPow(&e1, &e1, &ab)
+ fmt.Printf("e1=%s\n", e1.GetString(16))
+ if !e1.IsEqual(&e2) {
+ t.Errorf("not equal pairing\n%s\n%s", e1.GetString(16), e2.GetString(16))
+ }
+ {
+ s := P.GetString(IoSerializeHexStr)
+ var P1 G1
+ P1.SetString(s, IoSerializeHexStr)
+ if !P1.IsEqual(&P) {
+ t.Error("not equal to P")
+ return
+ }
+ s = Q.GetString(IoSerializeHexStr)
+ var Q1 G2
+ Q1.SetString(s, IoSerializeHexStr)
+ if !Q1.IsEqual(&Q) {
+ t.Error("not equal to Q")
+ return
+ }
+ }
+}
+
+func testMcl(t *testing.T, c int) {
+ err := Init(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testHash(t)
+ testNegAdd(t)
+ testPairing(t)
+ testGT(t)
+ testBadPointOfG2(t)
+}
+
+func TestMclMain(t *testing.T) {
+ t.Logf("GetMaxOpUnitSize() = %d\n", GetMaxOpUnitSize())
+ t.Log("CurveFp254BNb")
+ testMcl(t, CurveFp254BNb)
+ if GetMaxOpUnitSize() == 6 {
+ if GetFrUnitSize() == 6 {
+ t.Log("CurveFp382_1")
+ testMcl(t, CurveFp382_1)
+ }
+ t.Log("BLS12_381")
+ testMcl(t, BLS12_381)
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/Bn256Test.java b/vendor/github.com/tangerine-network/mcl/ffi/java/Bn256Test.java
new file mode 100644
index 000000000..b1f9f6f34
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/Bn256Test.java
@@ -0,0 +1,104 @@
+import java.io.*;
+import com.herumi.mcl.*;
+
+/*
+ Bn256Test
+*/
+public class Bn256Test {
+ static {
+ String lib = "mcl_bn256";
+ String libName = System.mapLibraryName(lib);
+ System.out.println("libName : " + libName);
+ System.loadLibrary(lib);
+ }
+ public static void assertEquals(String msg, String x, String y) {
+ if (x.equals(y)) {
+ System.out.println("OK : " + msg);
+ } else {
+ System.out.println("NG : " + msg + ", x = " + x + ", y = " + y);
+ }
+ }
+ public static void assertBool(String msg, boolean b) {
+ if (b) {
+ System.out.println("OK : " + msg);
+ } else {
+ System.out.println("NG : " + msg);
+ }
+ }
+ public static void main(String argv[]) {
+ try {
+ Bn256.SystemInit();
+ Fr x = new Fr(5);
+ Fr y = new Fr(-2);
+ Fr z = new Fr(5);
+ assertBool("x != y", !x.equals(y));
+ assertBool("x == z", x.equals(z));
+ assertEquals("x == 5", x.toString(), "5");
+ Bn256.add(x, x, y);
+ assertEquals("x == 3", x.toString(), "3");
+ Bn256.mul(x, x, x);
+ assertEquals("x == 9", x.toString(), "9");
+ G1 P = new G1();
+ System.out.println("P=" + P);
+ P.set("-1", "1");
+ System.out.println("P=" + P);
+ Bn256.neg(P, P);
+ System.out.println("P=" + P);
+
+ String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441";
+ String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671";
+ String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736";
+ String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616";
+
+ G2 Q = new G2(xa, xb, ya, yb);
+
+ P.hashAndMapToG1("This is a pen");
+ {
+ String s = P.toString();
+ G1 P1 = new G1();
+ P1.setStr(s);
+ assertBool("P == P1", P1.equals(P));
+ }
+
+ GT e = new GT();
+ Bn256.pairing(e, P, Q);
+ GT e1 = new GT();
+ GT e2 = new GT();
+ Fr c = new Fr("1234567890123234928348230428394234");
+ G2 cQ = new G2(Q);
+ Bn256.mul(cQ, Q, c); // cQ = Q * c
+ Bn256.pairing(e1, P, cQ);
+ Bn256.pow(e2, e, c); // e2 = e^c
+ assertBool("e1 == e2", e1.equals(e2));
+
+ G1 cP = new G1(P);
+ Bn256.mul(cP, P, c); // cP = P * c
+ Bn256.pairing(e1, cP, Q);
+ assertBool("e1 == e2", e1.equals(e2));
+
+ BLSsignature(Q);
+ } catch (RuntimeException e) {
+ System.out.println("unknown exception :" + e);
+ }
+ }
+ public static void BLSsignature(G2 Q)
+ {
+ Fr s = new Fr();
+ s.setRand(); // secret key
+ System.out.println("secret key " + s);
+ G2 pub = new G2();
+ Bn256.mul(pub, Q, s); // public key = sQ
+
+ String m = "signature test";
+ G1 H = new G1();
+ H.hashAndMapToG1(m); // H = Hash(m)
+ G1 sign = new G1();
+ Bn256.mul(sign, H, s); // signature of m = s H
+
+ GT e1 = new GT();
+ GT e2 = new GT();
+ Bn256.pairing(e1, H, pub); // e1 = e(H, s Q)
+ Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q);
+ assertBool("verify signature", e1.equals(e2));
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/ElgamalTest.java b/vendor/github.com/tangerine-network/mcl/ffi/java/ElgamalTest.java
new file mode 100644
index 000000000..0cf49e144
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/ElgamalTest.java
@@ -0,0 +1,144 @@
+import java.io.*;
+import com.herumi.mcl.*;
+
+/*
+ ElgamalTest [ecParam]
+ ecParam = secp192k1, NIST_P224, ...
+ hashParam = hash224, hash384, ...
+*/
+public class ElgamalTest {
+ static {
+ String lib = "mcl_elgamal";
+ String libName = System.mapLibraryName(lib);
+ System.out.println("libName : " + libName);
+ System.loadLibrary(lib);
+ }
+ public static void assertEquals(String msg, int x, int y) {
+ if (x == y) {
+ System.out.println("OK : " + msg);
+ } else {
+ System.out.println("NG : " + msg + ", x = " + x + ", y = " + y);
+ }
+ }
+ public static void assertBool(String msg, boolean b) {
+ if (b) {
+ System.out.println("OK : " + msg);
+ } else {
+ System.out.println("NG : " + msg);
+ }
+ }
+ public static void main(String argv[]) {
+ try {
+ String ecStr = "secp192k1";
+ String hashStr = "sha224";
+ for (int i = 0; i < argv.length; i++) {
+ if (argv[i].equals("-e") && i < argv.length - 1) {
+ ecStr = argv[i + 1];
+ i++;
+ } else
+ if (argv[i].equals("-h") && i < argv.length - 1) {
+ hashStr = argv[i + 1];
+ i++;
+ }
+ }
+ String param = ecStr + " " + hashStr;
+ System.out.println("param=" + param);
+ Elgamal.SystemInit(param);
+
+ String prvStr = "";
+ String pubStr = "";
+ {
+ PrivateKey prv = new PrivateKey();
+ prv.init();
+ prvStr = prv.toStr();
+ PublicKey pub = prv.getPublicKey();
+ pubStr = pub.toStr();
+ }
+ int m = 1234;
+ CipherText c = new CipherText();
+ PublicKey pub = new PublicKey();
+
+ pub.fromStr(pubStr);
+
+ pub.enc(c, m);
+
+ PrivateKey prv = new PrivateKey();
+ prv.fromStr(prvStr);
+ prv.setCache(0, 60000);
+
+ int dec = prv.dec(c);
+ // verify dec(enc(m)) == m
+ assertEquals("dec(enc(m)) == m", m, dec);
+
+ // verify toStr, fromStr
+ {
+ String cStr = c.toStr();
+ CipherText c2 = new CipherText();
+ c2.fromStr(cStr);
+ int dec2 = prv.dec(c2);
+ assertEquals("fromStr(toStr(CipherText) == CipherText", dec, dec2);
+ }
+
+ // verify dec(enc(str)) == str
+ pub.enc(c, "1234");
+ dec = prv.dec(c);
+ assertEquals("dec(enc(str)) == str", m, dec);
+
+ // verify dec(mul(enc(m), 3)) == m * 3
+ c.mul(3);
+ m *= 3;
+ dec = prv.dec(c);
+ assertEquals("mul(int)", m, dec);
+
+ // verify dec(mul(enc(m), "10")) == m * 10
+ c.mul("10");
+ m *= 10;
+ dec = prv.dec(c);
+ assertEquals("mul(str)", m, dec);
+
+ // convert str
+ {
+ String s = c.toStr();
+ CipherText c2 = new CipherText();
+ c2.fromStr(s);
+ dec = prv.dec(c);
+ assertEquals("fromStr", m, dec);
+ }
+ // rerandomize
+ pub.rerandomize(c);
+ dec = prv.dec(c);
+ assertEquals("rerandomize", m, dec);
+ int m2 = 12345;
+ // verify dec(add(enc(m), m2)) == m + m2
+ pub.add(c, m2);
+ m += m2;
+ dec = prv.dec(c);
+ assertEquals("pub.add(int)", m, dec);
+
+ pub.add(c, "993");
+ m += 993;
+ dec = prv.dec(c);
+ assertEquals("pub.add(str)", m, dec);
+
+ // string test
+ String m3 = "-2000000";
+ String m4 = "2001234";
+ CipherText c2 = new CipherText();
+ SWIGTYPE_p_bool b = Elgamal.new_p_bool();
+ pub.enc(c, m3);
+ dec = prv.dec(c, b);
+ assertBool("expect dec fail", !Elgamal.p_bool_value(b));
+ pub.enc(c2, m4);
+ dec = prv.dec(c2, b);
+ assertBool("expect dec fail", !Elgamal.p_bool_value(b));
+ c.add(c2); // m3 + m4
+
+ dec = prv.dec(c, b);
+ assertEquals("int add", 1234, dec);
+ assertBool("expect dec success", Elgamal.p_bool_value(b));
+ Elgamal.delete_p_bool(b);
+ } catch (RuntimeException e) {
+ System.out.println("unknown exception :" + e);
+ }
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/Makefile b/vendor/github.com/tangerine-network/mcl/ffi/java/Makefile
new file mode 100644
index 000000000..d69c043fb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/Makefile
@@ -0,0 +1,64 @@
+TOP_DIR=../..
+include $(TOP_DIR)/common.mk
+ifeq ($(UNAME_S),Darwin)
+ JAVA_INC=-I/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/
+else
+ JAVA_INC=-I/usr/lib/jvm/default-java/include
+#JAVA_INC=-I/usr/lib/jvm/java-7-openjdk-amd64/include
+ CFLAGS+=-z noexecstack
+ LDFLAGS+=-lrt
+endif
+CFLAGS+=$(JAVA_INC) $(JAVA_INC)/linux -I $(TOP_DIR)/include -I $(TOP_DIR)/../xbyak -I $(TOP_DIR)/../cybozulib/include -Wno-strict-aliasing
+MCL_LIB=$(TOP_DIR)/lib/libmcl.a
+
+PACKAGE_NAME=com.herumi.mcl
+PACKAGE_DIR=$(subst .,/,$(PACKAGE_NAME))
+
+ELGAMAL_LIB=$(TOP_DIR)/bin/libmcl_elgamal.$(LIB_SUF)
+BN256_LIB=$(TOP_DIR)/bin/libmcl_bn256.$(LIB_SUF)
+JAVA_EXE=cd $(TOP_DIR)/bin && LD_LIBRARY_PATH=./:$(LD_LIBRARY_PATH) java -classpath ../ffi/java
+all: $(ELGAMAL_LIB)
+
+elgamal_wrap.cxx: elgamal.i elgamal_impl.hpp
+ $(MKDIR) $(PACKAGE_DIR)
+ swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall elgamal.i
+
+bn256_wrap.cxx: bn256.i bn256_impl.hpp
+ $(MKDIR) $(PACKAGE_DIR)
+ swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall bn256.i
+
+$(MCL_LIB):
+ make -C $(TOP_DIR)
+
+$(ELGAMAL_LIB): elgamal_wrap.cxx $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared
+
+$(BN256_LIB): bn256_wrap.cxx $(MCL_LIB)
+ $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared
+
+%.class: %.java
+ javac $<
+
+ElgamalTest.class: ElgamalTest.java $(ELGAMAL_LIB)
+Bn256Test.class: Bn256Test.java $(BN256_LIB)
+
+jar:
+ jar cvf mcl.jar com
+
+test_elgamal: ElgamalTest.class $(ELGAMAL_LIB)
+ $(JAVA_EXE) ElgamalTest
+ $(JAVA_EXE) ElgamalTest -e NIST_P192
+ $(JAVA_EXE) ElgamalTest -e NIST_P256 -h sha256
+ $(JAVA_EXE) ElgamalTest -e NIST_P384 -h sha384
+ $(JAVA_EXE) ElgamalTest -e NIST_P521 -h sha512
+
+test_bn256: Bn256Test.class $(BN256_LIB)
+ $(JAVA_EXE) Bn256Test
+
+test:
+ $(MAKE) test_elgamal
+ $(MAKE) test_bn256
+
+clean:
+ rm -rf *.class $(ELGAMAL_LIB) $(PACKAGE_DIR)/*.class *_wrap.cxx
+
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/bn256.i b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256.i
new file mode 100644
index 000000000..94a8edb7a
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256.i
@@ -0,0 +1,31 @@
+%module Bn256
+
+%include "std_string.i"
+%include "std_except.i"
+
+
+%{
+#include <cybozu/random_generator.hpp>
+#include <cybozu/crypto.hpp>
+#include <mcl/bn256.hpp>
+struct Param {
+ cybozu::RandomGenerator rg;
+ static inline Param& getParam()
+ {
+ static Param p;
+ return p;
+ }
+};
+
+static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m)
+{
+ std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m);
+ mcl::bn256::Fp t;
+ t.setArrayMask(digest.c_str(), digest.size());
+ mcl::bn256::BN::param.mapTo.calcG1(P, t);
+}
+
+#include "bn256_impl.hpp"
+%}
+
+%include "bn256_impl.hpp"
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_impl.hpp b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_impl.hpp
new file mode 100644
index 000000000..c4caaf3ca
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_impl.hpp
@@ -0,0 +1,249 @@
+#include <mcl/bn256.hpp>
+#include <stdint.h>
+#include <sstream>
+
+void SystemInit() throw(std::exception)
+{
+ mcl::bn256::initPairing();
+}
+
+class G1;
+class G2;
+class GT;
+/*
+ Fr = Z / rZ
+*/
+class Fr {
+ mcl::bn256::Fr self_;
+ friend class G1;
+ friend class G2;
+ friend class GT;
+ friend void neg(Fr& y, const Fr& x);
+ friend void add(Fr& z, const Fr& x, const Fr& y);
+ friend void sub(Fr& z, const Fr& x, const Fr& y);
+ friend void mul(Fr& z, const Fr& x, const Fr& y);
+ friend void mul(G1& z, const G1& x, const Fr& y);
+ friend void mul(G2& z, const G2& x, const Fr& y);
+ friend void div(Fr& z, const Fr& x, const Fr& y);
+ friend void pow(GT& z, const GT& x, const Fr& y);
+public:
+ Fr() {}
+ Fr(const Fr& rhs) : self_(rhs.self_) {}
+ Fr(int x) : self_(x) {}
+ Fr(const std::string& str) throw(std::exception)
+ : self_(str) {}
+ bool equals(const Fr& rhs) const { return self_ == rhs.self_; }
+ void setStr(const std::string& str) throw(std::exception)
+ {
+ self_.setStr(str);
+ }
+ void setInt(int x)
+ {
+ self_ = x;
+ }
+ void clear()
+ {
+ self_.clear();
+ }
+ void setRand()
+ {
+ self_.setRand(Param::getParam().rg);
+ }
+ std::string toString() const throw(std::exception)
+ {
+ return self_.getStr();
+ }
+};
+
+void neg(Fr& y, const Fr& x)
+{
+ mcl::bn256::Fr::neg(y.self_, x.self_);
+}
+
+void add(Fr& z, const Fr& x, const Fr& y)
+{
+ mcl::bn256::Fr::add(z.self_, x.self_, y.self_);
+}
+
+void sub(Fr& z, const Fr& x, const Fr& y)
+{
+ mcl::bn256::Fr::sub(z.self_, x.self_, y.self_);
+}
+
+void mul(Fr& z, const Fr& x, const Fr& y)
+{
+ mcl::bn256::Fr::mul(z.self_, x.self_, y.self_);
+}
+
+void div(Fr& z, const Fr& x, const Fr& y)
+{
+ mcl::bn256::Fr::div(z.self_, x.self_, y.self_);
+}
+
+/*
+ #G1 = r
+*/
+class G1 {
+ mcl::bn256::G1 self_;
+ friend void neg(G1& y, const G1& x);
+ friend void dbl(G1& y, const G1& x);
+ friend void add(G1& z, const G1& x, const G1& y);
+ friend void sub(G1& z, const G1& x, const G1& y);
+ friend void mul(G1& z, const G1& x, const Fr& y);
+ friend void pairing(GT& e, const G1& P, const G2& Q);
+public:
+ G1() {}
+ G1(const G1& rhs) : self_(rhs.self_) {}
+ G1(const std::string& x, const std::string& y) throw(std::exception)
+ : self_(mcl::bn256::Fp(x), mcl::bn256::Fp(y))
+ {
+ }
+ bool equals(const G1& rhs) const { return self_ == rhs.self_; }
+ void set(const std::string& x, const std::string& y)
+ {
+ self_.set(mcl::bn256::Fp(x), mcl::bn256::Fp(y));
+ }
+ void hashAndMapToG1(const std::string& m) throw(std::exception)
+ {
+ HashAndMapToG1(self_, m);
+ }
+ void clear()
+ {
+ self_.clear();
+ }
+ /*
+ compressed format
+ */
+ void setStr(const std::string& str) throw(std::exception)
+ {
+ self_.setStr(str);
+ }
+ std::string toString() const throw(std::exception)
+ {
+ return self_.getStr();
+ }
+};
+
+void neg(G1& y, const G1& x)
+{
+ mcl::bn256::G1::neg(y.self_, x.self_);
+}
+void dbl(G1& y, const G1& x)
+{
+ mcl::bn256::G1::dbl(y.self_, x.self_);
+}
+void add(G1& z, const G1& x, const G1& y)
+{
+ mcl::bn256::G1::add(z.self_, x.self_, y.self_);
+}
+void sub(G1& z, const G1& x, const G1& y)
+{
+ mcl::bn256::G1::sub(z.self_, x.self_, y.self_);
+}
+void mul(G1& z, const G1& x, const Fr& y)
+{
+ mcl::bn256::G1::mul(z.self_, x.self_, y.self_);
+}
+
+/*
+ #G2 = r
+*/
+class G2 {
+ mcl::bn256::G2 self_;
+ friend void neg(G2& y, const G2& x);
+ friend void dbl(G2& y, const G2& x);
+ friend void add(G2& z, const G2& x, const G2& y);
+ friend void sub(G2& z, const G2& x, const G2& y);
+ friend void mul(G2& z, const G2& x, const Fr& y);
+ friend void pairing(GT& e, const G1& P, const G2& Q);
+public:
+ G2() {}
+ G2(const G2& rhs) : self_(rhs.self_) {}
+ G2(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb) throw(std::exception)
+ : self_(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb))
+ {
+ }
+ bool equals(const G2& rhs) const { return self_ == rhs.self_; }
+ void set(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb)
+ {
+ self_.set(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb));
+ }
+ void clear()
+ {
+ self_.clear();
+ }
+ /*
+ compressed format
+ */
+ void setStr(const std::string& str) throw(std::exception)
+ {
+ self_.setStr(str);
+ }
+ std::string toString() const throw(std::exception)
+ {
+ return self_.getStr();
+ }
+};
+
+void neg(G2& y, const G2& x)
+{
+ mcl::bn256::G2::neg(y.self_, x.self_);
+}
+void dbl(G2& y, const G2& x)
+{
+ mcl::bn256::G2::dbl(y.self_, x.self_);
+}
+void add(G2& z, const G2& x, const G2& y)
+{
+ mcl::bn256::G2::add(z.self_, x.self_, y.self_);
+}
+void sub(G2& z, const G2& x, const G2& y)
+{
+ mcl::bn256::G2::sub(z.self_, x.self_, y.self_);
+}
+void mul(G2& z, const G2& x, const Fr& y)
+{
+ mcl::bn256::G2::mul(z.self_, x.self_, y.self_);
+}
+
+/*
+ #GT = r
+*/
+class GT {
+ mcl::bn256::Fp12 self_;
+ friend void mul(GT& z, const GT& x, const GT& y);
+ friend void pow(GT& z, const GT& x, const Fr& y);
+ friend void pairing(GT& e, const G1& P, const G2& Q);
+public:
+ GT() {}
+ GT(const GT& rhs) : self_(rhs.self_) {}
+ bool equals(const GT& rhs) const { return self_ == rhs.self_; }
+ void clear()
+ {
+ self_.clear();
+ }
+ void setStr(const std::string& str) throw(std::exception)
+ {
+ std::istringstream iss(str);
+ iss >> self_;
+ }
+ std::string toString() const throw(std::exception)
+ {
+ std::ostringstream oss;
+ oss << self_;
+ return oss.str();
+ }
+};
+
+void mul(GT& z, const GT& x, const GT& y)
+{
+ mcl::bn256::Fp12::mul(z.self_, x.self_, y.self_);
+}
+void pow(GT& z, const GT& x, const Fr& y)
+{
+ mcl::bn256::Fp12::pow(z.self_, x.self_, y.self_);
+}
+void pairing(GT& e, const G1& P, const G2& Q)
+{
+ mcl::bn256::pairing(e.self_, P.self_, Q.self_);
+}
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_wrap.cxx b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_wrap.cxx
new file mode 100644
index 000000000..0c8257af5
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/bn256_wrap.cxx
@@ -0,0 +1,1542 @@
+/* ----------------------------------------------------------------------------
+ * This file was automatically generated by SWIG (http://www.swig.org).
+ * Version 3.0.12
+ *
+ * This file is not intended to be easily readable and contains a number of
+ * coding conventions designed to improve portability and efficiency. Do not make
+ * changes to this file unless you know what you are doing--modify the SWIG
+ * interface file instead.
+ * ----------------------------------------------------------------------------- */
+
+
+#ifndef SWIGJAVA
+#define SWIGJAVA
+#endif
+
+
+
+#ifdef __cplusplus
+/* SwigValueWrapper is described in swig.swg */
+template<typename T> class SwigValueWrapper {
+ struct SwigMovePointer {
+ T *ptr;
+ SwigMovePointer(T *p) : ptr(p) { }
+ ~SwigMovePointer() { delete ptr; }
+ SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; }
+ } pointer;
+ SwigValueWrapper& operator=(const SwigValueWrapper<T>& rhs);
+ SwigValueWrapper(const SwigValueWrapper<T>& rhs);
+public:
+ SwigValueWrapper() : pointer(0) { }
+ SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; }
+ operator T&() const { return *pointer.ptr; }
+ T *operator&() { return pointer.ptr; }
+};
+
+template <typename T> T SwigValueInit() {
+ return T();
+}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * This section contains generic SWIG labels for method/variable
+ * declarations/attributes, and other compiler dependent labels.
+ * ----------------------------------------------------------------------------- */
+
+/* template workaround for compilers that cannot correctly implement the C++ standard */
+#ifndef SWIGTEMPLATEDISAMBIGUATOR
+# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560)
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# elif defined(__HP_aCC)
+/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */
+/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# else
+# define SWIGTEMPLATEDISAMBIGUATOR
+# endif
+#endif
+
+/* inline attribute */
+#ifndef SWIGINLINE
+# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__))
+# define SWIGINLINE inline
+# else
+# define SWIGINLINE
+# endif
+#endif
+
+/* attribute recognised by some compilers to avoid 'unused' warnings */
+#ifndef SWIGUNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+# elif defined(__ICC)
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+#endif
+
+#ifndef SWIG_MSC_UNSUPPRESS_4505
+# if defined(_MSC_VER)
+# pragma warning(disable : 4505) /* unreferenced local function has been removed */
+# endif
+#endif
+
+#ifndef SWIGUNUSEDPARM
+# ifdef __cplusplus
+# define SWIGUNUSEDPARM(p)
+# else
+# define SWIGUNUSEDPARM(p) p SWIGUNUSED
+# endif
+#endif
+
+/* internal SWIG method */
+#ifndef SWIGINTERN
+# define SWIGINTERN static SWIGUNUSED
+#endif
+
+/* internal inline SWIG method */
+#ifndef SWIGINTERNINLINE
+# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE
+#endif
+
+/* exporting methods */
+#if defined(__GNUC__)
+# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# ifndef GCC_HASCLASSVISIBILITY
+# define GCC_HASCLASSVISIBILITY
+# endif
+# endif
+#endif
+
+#ifndef SWIGEXPORT
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# if defined(STATIC_LINKED)
+# define SWIGEXPORT
+# else
+# define SWIGEXPORT __declspec(dllexport)
+# endif
+# else
+# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY)
+# define SWIGEXPORT __attribute__ ((visibility("default")))
+# else
+# define SWIGEXPORT
+# endif
+# endif
+#endif
+
+/* calling conventions for Windows */
+#ifndef SWIGSTDCALL
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# define SWIGSTDCALL __stdcall
+# else
+# define SWIGSTDCALL
+# endif
+#endif
+
+/* Deal with Microsoft's attempt at deprecating C standard runtime functions */
+#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE)
+# define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */
+#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE)
+# define _SCL_SECURE_NO_DEPRECATE
+#endif
+
+/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */
+#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES)
+# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0
+#endif
+
+/* Intel's compiler complains if a variable which was never initialised is
+ * cast to void, which is a common idiom which we use to indicate that we
+ * are aware a variable isn't used. So we just silence that warning.
+ * See: https://github.com/swig/swig/issues/192 for more discussion.
+ */
+#ifdef __INTEL_COMPILER
+# pragma warning disable 592
+#endif
+
+
+/* Fix for jlong on some versions of gcc on Windows */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+ typedef long long __int64;
+#endif
+
+/* Fix for jlong on 64-bit x86 Solaris */
+#if defined(__x86_64)
+# ifdef _LP64
+# undef _LP64
+# endif
+#endif
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+/* Support for throwing Java exceptions */
+typedef enum {
+ SWIG_JavaOutOfMemoryError = 1,
+ SWIG_JavaIOException,
+ SWIG_JavaRuntimeException,
+ SWIG_JavaIndexOutOfBoundsException,
+ SWIG_JavaArithmeticException,
+ SWIG_JavaIllegalArgumentException,
+ SWIG_JavaNullPointerException,
+ SWIG_JavaDirectorPureVirtual,
+ SWIG_JavaUnknownError
+} SWIG_JavaExceptionCodes;
+
+typedef struct {
+ SWIG_JavaExceptionCodes code;
+ const char *java_exception;
+} SWIG_JavaExceptions_t;
+
+
+static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) {
+ jclass excep;
+ static const SWIG_JavaExceptions_t java_exceptions[] = {
+ { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" },
+ { SWIG_JavaIOException, "java/io/IOException" },
+ { SWIG_JavaRuntimeException, "java/lang/RuntimeException" },
+ { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" },
+ { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" },
+ { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" },
+ { SWIG_JavaNullPointerException, "java/lang/NullPointerException" },
+ { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" },
+ { SWIG_JavaUnknownError, "java/lang/UnknownError" },
+ { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" }
+ };
+ const SWIG_JavaExceptions_t *except_ptr = java_exceptions;
+
+ while (except_ptr->code != code && except_ptr->code)
+ except_ptr++;
+
+ jenv->ExceptionClear();
+ excep = jenv->FindClass(except_ptr->java_exception);
+ if (excep)
+ jenv->ThrowNew(excep, msg);
+}
+
+
+/* Contract support */
+
+#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else
+
+
+#include <string>
+
+
+#include <typeinfo>
+#include <stdexcept>
+
+
+#include <cybozu/random_generator.hpp>
+#include <cybozu/crypto.hpp>
+#include <mcl/bn256.hpp>
+struct Param {
+ cybozu::RandomGenerator rg;
+ static inline Param& getParam()
+ {
+ static Param p;
+ return p;
+ }
+};
+
+static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m)
+{
+ std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m);
+ mcl::bn256::Fp t;
+ t.setArrayMask(digest.c_str(), digest.size());
+ mcl::bn256::BN::param.mapTo.calcG1(P, t);
+}
+
+#include "bn256_impl.hpp"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_SystemInit(JNIEnv *jenv, jclass jcls) {
+ (void)jenv;
+ (void)jcls;
+ try {
+ SystemInit();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ Fr *arg1 = 0 ;
+ Fr *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null");
+ return ;
+ }
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ neg(*arg1,(Fr const &)*arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ Fr *arg1 = 0 ;
+ Fr *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null");
+ return ;
+ }
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ add(*arg1,(Fr const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ Fr *arg1 = 0 ;
+ Fr *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null");
+ return ;
+ }
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ sub(*arg1,(Fr const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ Fr *arg1 = 0 ;
+ Fr *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null");
+ return ;
+ }
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ mul(*arg1,(Fr const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G1 *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ mul(*arg1,(G1 const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G2 *arg1 = 0 ;
+ G2 *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null");
+ return ;
+ }
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ mul(*arg1,(G2 const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_div(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ Fr *arg1 = 0 ;
+ Fr *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null");
+ return ;
+ }
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ div(*arg1,(Fr const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pow(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ GT *arg1 = 0 ;
+ GT *arg2 = 0 ;
+ Fr *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(GT **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null");
+ return ;
+ }
+ arg2 = *(GT **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null");
+ return ;
+ }
+ arg3 = *(Fr **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return ;
+ }
+ pow(*arg1,(GT const &)*arg2,(Fr const &)*arg3);
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_10(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ Fr *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (Fr *)new Fr();
+ *(Fr **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jlong jresult = 0 ;
+ Fr *arg1 = 0 ;
+ Fr *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return 0;
+ }
+ result = (Fr *)new Fr((Fr const &)*arg1);
+ *(Fr **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jint jarg1) {
+ jlong jresult = 0 ;
+ int arg1 ;
+ Fr *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = (int)jarg1;
+ result = (Fr *)new Fr(arg1);
+ *(Fr **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jstring jarg1) {
+ jlong jresult = 0 ;
+ std::string *arg1 = 0 ;
+ Fr *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ if(!jarg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0);
+ if (!arg1_pstr) return 0;
+ std::string arg1_str(arg1_pstr);
+ arg1 = &arg1_str;
+ jenv->ReleaseStringUTFChars(jarg1, arg1_pstr);
+ try {
+ result = (Fr *)new Fr((std::string const &)*arg1);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ *(Fr **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ jboolean jresult = 0 ;
+ Fr *arg1 = (Fr *) 0 ;
+ Fr *arg2 = 0 ;
+ bool result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(Fr **)&jarg1;
+ arg2 = *(Fr **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null");
+ return 0;
+ }
+ result = (bool)((Fr const *)arg1)->equals((Fr const &)*arg2);
+ jresult = (jboolean)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ Fr *arg1 = (Fr *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->setStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setInt(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) {
+ Fr *arg1 = (Fr *) 0 ;
+ int arg2 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ arg2 = (int)jarg2;
+ (arg1)->setInt(arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ Fr *arg1 = (Fr *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ (arg1)->clear();
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setRand(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ Fr *arg1 = (Fr *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ (arg1)->setRand();
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ Fr *arg1 = (Fr *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(Fr **)&jarg1;
+ try {
+ result = ((Fr const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1Fr(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ Fr *arg1 = (Fr *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(Fr **)&jarg1;
+ delete arg1;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ G1 *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ neg(*arg1,(G1 const &)*arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ G1 *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ dbl(*arg1,(G1 const &)*arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G1 *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+ G1 *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ arg3 = *(G1 **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ add(*arg1,(G1 const &)*arg2,(G1 const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G1 *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+ G1 *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ arg3 = *(G1 **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ sub(*arg1,(G1 const &)*arg2,(G1 const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pairing(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ GT *arg1 = 0 ;
+ G1 *arg2 = 0 ;
+ G2 *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(GT **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null");
+ return ;
+ }
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return ;
+ }
+ arg3 = *(G2 **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ pairing(*arg1,(G1 const &)*arg2,(G2 const &)*arg3);
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_10(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ G1 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (G1 *)new G1();
+ *(G1 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jlong jresult = 0 ;
+ G1 *arg1 = 0 ;
+ G1 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return 0;
+ }
+ result = (G1 *)new G1((G1 const &)*arg1);
+ *(G1 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2) {
+ jlong jresult = 0 ;
+ std::string *arg1 = 0 ;
+ std::string *arg2 = 0 ;
+ G1 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ if(!jarg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0);
+ if (!arg1_pstr) return 0;
+ std::string arg1_str(arg1_pstr);
+ arg1 = &arg1_str;
+ jenv->ReleaseStringUTFChars(jarg1, arg1_pstr);
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return 0;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ result = (G1 *)new G1((std::string const &)*arg1,(std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ *(G1 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ jboolean jresult = 0 ;
+ G1 *arg1 = (G1 *) 0 ;
+ G1 *arg2 = 0 ;
+ bool result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G1 **)&jarg1;
+ arg2 = *(G1 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null");
+ return 0;
+ }
+ result = (bool)((G1 const *)arg1)->equals((G1 const &)*arg2);
+ jresult = (jboolean)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3) {
+ G1 *arg1 = (G1 *) 0 ;
+ std::string *arg2 = 0 ;
+ std::string *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ if(!jarg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0);
+ if (!arg3_pstr) return ;
+ std::string arg3_str(arg3_pstr);
+ arg3 = &arg3_str;
+ jenv->ReleaseStringUTFChars(jarg3, arg3_pstr);
+ (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1hashAndMapToG1(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ G1 *arg1 = (G1 *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->hashAndMapToG1((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ G1 *arg1 = (G1 *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ (arg1)->clear();
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ G1 *arg1 = (G1 *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->setStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ G1 *arg1 = (G1 *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G1 **)&jarg1;
+ try {
+ result = ((G1 const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G1(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ G1 *arg1 = (G1 *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(G1 **)&jarg1;
+ delete arg1;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ G2 *arg1 = 0 ;
+ G2 *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null");
+ return ;
+ }
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ neg(*arg1,(G2 const &)*arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ G2 *arg1 = 0 ;
+ G2 *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null");
+ return ;
+ }
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ dbl(*arg1,(G2 const &)*arg2);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G2 *arg1 = 0 ;
+ G2 *arg2 = 0 ;
+ G2 *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null");
+ return ;
+ }
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ arg3 = *(G2 **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ add(*arg1,(G2 const &)*arg2,(G2 const &)*arg3);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ G2 *arg1 = 0 ;
+ G2 *arg2 = 0 ;
+ G2 *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null");
+ return ;
+ }
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ arg3 = *(G2 **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return ;
+ }
+ sub(*arg1,(G2 const &)*arg2,(G2 const &)*arg3);
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_10(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ G2 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (G2 *)new G2();
+ *(G2 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jlong jresult = 0 ;
+ G2 *arg1 = 0 ;
+ G2 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G2 **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return 0;
+ }
+ result = (G2 *)new G2((G2 const &)*arg1);
+ *(G2 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2, jstring jarg3, jstring jarg4) {
+ jlong jresult = 0 ;
+ std::string *arg1 = 0 ;
+ std::string *arg2 = 0 ;
+ std::string *arg3 = 0 ;
+ std::string *arg4 = 0 ;
+ G2 *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ if(!jarg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0);
+ if (!arg1_pstr) return 0;
+ std::string arg1_str(arg1_pstr);
+ arg1 = &arg1_str;
+ jenv->ReleaseStringUTFChars(jarg1, arg1_pstr);
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return 0;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ if(!jarg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0);
+ if (!arg3_pstr) return 0;
+ std::string arg3_str(arg3_pstr);
+ arg3 = &arg3_str;
+ jenv->ReleaseStringUTFChars(jarg3, arg3_pstr);
+ if(!jarg4) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return 0;
+ }
+ const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0);
+ if (!arg4_pstr) return 0;
+ std::string arg4_str(arg4_pstr);
+ arg4 = &arg4_str;
+ jenv->ReleaseStringUTFChars(jarg4, arg4_pstr);
+ try {
+ result = (G2 *)new G2((std::string const &)*arg1,(std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ *(G2 **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ jboolean jresult = 0 ;
+ G2 *arg1 = (G2 *) 0 ;
+ G2 *arg2 = 0 ;
+ bool result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(G2 **)&jarg1;
+ arg2 = *(G2 **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null");
+ return 0;
+ }
+ result = (bool)((G2 const *)arg1)->equals((G2 const &)*arg2);
+ jresult = (jboolean)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3, jstring jarg4, jstring jarg5) {
+ G2 *arg1 = (G2 *) 0 ;
+ std::string *arg2 = 0 ;
+ std::string *arg3 = 0 ;
+ std::string *arg4 = 0 ;
+ std::string *arg5 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G2 **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ if(!jarg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0);
+ if (!arg3_pstr) return ;
+ std::string arg3_str(arg3_pstr);
+ arg3 = &arg3_str;
+ jenv->ReleaseStringUTFChars(jarg3, arg3_pstr);
+ if(!jarg4) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0);
+ if (!arg4_pstr) return ;
+ std::string arg4_str(arg4_pstr);
+ arg4 = &arg4_str;
+ jenv->ReleaseStringUTFChars(jarg4, arg4_pstr);
+ if(!jarg5) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg5_pstr = (const char *)jenv->GetStringUTFChars(jarg5, 0);
+ if (!arg5_pstr) return ;
+ std::string arg5_str(arg5_pstr);
+ arg5 = &arg5_str;
+ jenv->ReleaseStringUTFChars(jarg5, arg5_pstr);
+ (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4,(std::string const &)*arg5);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ G2 *arg1 = (G2 *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G2 **)&jarg1;
+ (arg1)->clear();
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ G2 *arg1 = (G2 *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G2 **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->setStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ G2 *arg1 = (G2 *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(G2 **)&jarg1;
+ try {
+ result = ((G2 const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G2(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ G2 *arg1 = (G2 *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(G2 **)&jarg1;
+ delete arg1;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) {
+ GT *arg1 = 0 ;
+ GT *arg2 = 0 ;
+ GT *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ (void)jarg3_;
+ arg1 = *(GT **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null");
+ return ;
+ }
+ arg2 = *(GT **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null");
+ return ;
+ }
+ arg3 = *(GT **)&jarg3;
+ if (!arg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null");
+ return ;
+ }
+ mul(*arg1,(GT const &)*arg2,(GT const &)*arg3);
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_10(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ GT *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (GT *)new GT();
+ *(GT **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jlong jresult = 0 ;
+ GT *arg1 = 0 ;
+ GT *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(GT **)&jarg1;
+ if (!arg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null");
+ return 0;
+ }
+ result = (GT *)new GT((GT const &)*arg1);
+ *(GT **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ jboolean jresult = 0 ;
+ GT *arg1 = (GT *) 0 ;
+ GT *arg2 = 0 ;
+ bool result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(GT **)&jarg1;
+ arg2 = *(GT **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null");
+ return 0;
+ }
+ result = (bool)((GT const *)arg1)->equals((GT const &)*arg2);
+ jresult = (jboolean)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ GT *arg1 = (GT *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(GT **)&jarg1;
+ (arg1)->clear();
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ GT *arg1 = (GT *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(GT **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->setStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ GT *arg1 = (GT *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(GT **)&jarg1;
+ try {
+ result = ((GT const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1GT(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ GT *arg1 = (GT *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(GT **)&jarg1;
+ delete arg1;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal.i b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal.i
new file mode 100644
index 000000000..410723174
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal.i
@@ -0,0 +1,28 @@
+%module Elgamal
+
+%include "std_string.i"
+%include "std_except.i"
+
+
+%{
+#include <cybozu/random_generator.hpp>
+#include <cybozu/crypto.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/ecparam.hpp>
+struct Param {
+const mcl::EcParam *ecParam;
+cybozu::RandomGenerator rg;
+cybozu::crypto::Hash::Name hashName;
+static inline Param& getParam()
+{
+ static Param p;
+ return p;
+}
+};
+
+#include "elgamal_impl.hpp"
+%}
+%include cpointer.i
+%pointer_functions(bool, p_bool);
+
+%include "elgamal_impl.hpp"
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_impl.hpp b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_impl.hpp
new file mode 100644
index 000000000..dbf2ba64e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_impl.hpp
@@ -0,0 +1,147 @@
+#pragma once
+//#define MCL_MAX_BIT_SIZE 521
+#include <iostream>
+#include <fstream>
+#include <cybozu/random_generator.hpp>
+#include <cybozu/crypto.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/ecparam.hpp>
+#include <mcl/elgamal.hpp>
+
+typedef mcl::FpT<mcl::FpTag, 521> Fp;
+typedef mcl::FpT<mcl::ZnTag, 521> Zn;
+typedef mcl::EcT<Fp> Ec;
+typedef mcl::ElgamalT<Ec, Zn> Elgamal;
+
+/*
+ init system
+ @param param [in] string such as "ecParamName hashName"
+ @note NOT thread safe because setting global parameters of elliptic curve
+ ex1) "secp192k1 sha256" // 192bit security + sha256
+ ex2) "secp160k1 sha1" // 160bit security + sha1
+ hashName : sha1 sha224 sha256 sha384 sha512
+*/
+void SystemInit(const std::string& param) throw(std::exception)
+{
+ std::istringstream iss(param);
+ std::string ecParamStr;
+ std::string hashNameStr;
+ if (iss >> ecParamStr >> hashNameStr) {
+ Param& p = Param::getParam();
+ p.ecParam = mcl::getEcParam(ecParamStr);
+ Zn::init(p.ecParam->n);
+ Fp::init(p.ecParam->p);
+ Ec::init(p.ecParam->a, p.ecParam->b);
+ p.hashName = cybozu::crypto::Hash::getName(hashNameStr);
+ return;
+ }
+ throw cybozu::Exception("SystemInit:bad param") << param;
+}
+
+class CipherText {
+ Elgamal::CipherText self_;
+ friend class PublicKey;
+ friend class PrivateKey;
+public:
+ std::string toStr() const throw(std::exception) { return self_.toStr(); }
+ std::string toString() const throw(std::exception) { return toStr(); }
+ void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); }
+
+ void add(const CipherText& c) throw(std::exception) { self_.add(c.self_); }
+ void mul(int m) throw(std::exception)
+ {
+ self_.mul(m);
+ }
+ void mul(const std::string& str) throw(std::exception)
+ {
+ Zn zn(str);
+ self_.mul(zn);
+ }
+};
+
+class PublicKey {
+ Elgamal::PublicKey self_;
+ friend class PrivateKey;
+public:
+ std::string toStr() const throw(std::exception) { return self_.toStr(); }
+ std::string toString() const throw(std::exception) { return toStr(); }
+ void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); }
+
+ void save(const std::string& fileName) const throw(std::exception)
+ {
+ std::ofstream ofs(fileName.c_str(), std::ios::binary);
+ if (!(ofs << self_)) throw cybozu::Exception("PublicKey:save") << fileName;
+ }
+ void load(const std::string& fileName) throw(std::exception)
+ {
+ std::ifstream ifs(fileName.c_str(), std::ios::binary);
+ if (!(ifs >> self_)) throw cybozu::Exception("PublicKey:load") << fileName;
+ }
+ void enc(CipherText& c, int m) const throw(std::exception)
+ {
+ self_.enc(c.self_, m, Param::getParam().rg);
+ }
+ void enc(CipherText& c, const std::string& str) const throw(std::exception)
+ {
+ Zn zn(str);
+ self_.enc(c.self_, zn, Param::getParam().rg);
+ }
+ void rerandomize(CipherText& c) const throw(std::exception)
+ {
+ self_.rerandomize(c.self_, Param::getParam().rg);
+ }
+ void add(CipherText& c, int m) const throw(std::exception)
+ {
+ self_.add(c.self_, m);
+ }
+ void add(CipherText& c, const std::string& str) const throw(std::exception)
+ {
+ Zn zn(str);
+ self_.add(c.self_, zn);
+ }
+};
+
+class PrivateKey {
+ Elgamal::PrivateKey self_;
+public:
+ std::string toStr() const throw(std::exception) { return self_.toStr(); }
+ std::string toString() const throw(std::exception) { return toStr(); }
+ void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); }
+
+ void save(const std::string& fileName) const throw(std::exception)
+ {
+ std::ofstream ofs(fileName.c_str(), std::ios::binary);
+ if (!(ofs << self_)) throw cybozu::Exception("PrivateKey:save") << fileName;
+ }
+ void load(const std::string& fileName) throw(std::exception)
+ {
+ std::ifstream ifs(fileName.c_str(), std::ios::binary);
+ if (!(ifs >> self_)) throw cybozu::Exception("PrivateKey:load") << fileName;
+ }
+ void init() throw(std::exception)
+ {
+ Param& p = Param::getParam();
+ const Fp x0(p.ecParam->gx);
+ const Fp y0(p.ecParam->gy);
+ Ec P(x0, y0);
+ self_.init(P, Zn::getBitSize(), p.rg);
+ }
+ PublicKey getPublicKey() const throw(std::exception)
+ {
+ PublicKey ret;
+ ret.self_ = self_.getPublicKey();
+ return ret;
+ }
+ int dec(const CipherText& c, bool *b = 0) const throw(std::exception)
+ {
+ return self_.dec(c.self_, b);
+ }
+ void setCache(int rangeMin, int rangeMax) throw(std::exception)
+ {
+ self_.setCache(rangeMin, rangeMax);
+ }
+ void clearCache() throw(std::exception)
+ {
+ self_.clearCache();
+ }
+};
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_wrap.cxx b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_wrap.cxx
new file mode 100644
index 000000000..38d05f489
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/elgamal_wrap.cxx
@@ -0,0 +1,1129 @@
+/* ----------------------------------------------------------------------------
+ * This file was automatically generated by SWIG (http://www.swig.org).
+ * Version 3.0.12
+ *
+ * This file is not intended to be easily readable and contains a number of
+ * coding conventions designed to improve portability and efficiency. Do not make
+ * changes to this file unless you know what you are doing--modify the SWIG
+ * interface file instead.
+ * ----------------------------------------------------------------------------- */
+
+
+#ifndef SWIGJAVA
+#define SWIGJAVA
+#endif
+
+
+
+#ifdef __cplusplus
+/* SwigValueWrapper is described in swig.swg */
+template<typename T> class SwigValueWrapper {
+ struct SwigMovePointer {
+ T *ptr;
+ SwigMovePointer(T *p) : ptr(p) { }
+ ~SwigMovePointer() { delete ptr; }
+ SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; }
+ } pointer;
+ SwigValueWrapper& operator=(const SwigValueWrapper<T>& rhs);
+ SwigValueWrapper(const SwigValueWrapper<T>& rhs);
+public:
+ SwigValueWrapper() : pointer(0) { }
+ SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; }
+ operator T&() const { return *pointer.ptr; }
+ T *operator&() { return pointer.ptr; }
+};
+
+template <typename T> T SwigValueInit() {
+ return T();
+}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * This section contains generic SWIG labels for method/variable
+ * declarations/attributes, and other compiler dependent labels.
+ * ----------------------------------------------------------------------------- */
+
+/* template workaround for compilers that cannot correctly implement the C++ standard */
+#ifndef SWIGTEMPLATEDISAMBIGUATOR
+# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560)
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# elif defined(__HP_aCC)
+/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */
+/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */
+# define SWIGTEMPLATEDISAMBIGUATOR template
+# else
+# define SWIGTEMPLATEDISAMBIGUATOR
+# endif
+#endif
+
+/* inline attribute */
+#ifndef SWIGINLINE
+# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__))
+# define SWIGINLINE inline
+# else
+# define SWIGINLINE
+# endif
+#endif
+
+/* attribute recognised by some compilers to avoid 'unused' warnings */
+#ifndef SWIGUNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+# elif defined(__ICC)
+# define SWIGUNUSED __attribute__ ((__unused__))
+# else
+# define SWIGUNUSED
+# endif
+#endif
+
+#ifndef SWIG_MSC_UNSUPPRESS_4505
+# if defined(_MSC_VER)
+# pragma warning(disable : 4505) /* unreferenced local function has been removed */
+# endif
+#endif
+
+#ifndef SWIGUNUSEDPARM
+# ifdef __cplusplus
+# define SWIGUNUSEDPARM(p)
+# else
+# define SWIGUNUSEDPARM(p) p SWIGUNUSED
+# endif
+#endif
+
+/* internal SWIG method */
+#ifndef SWIGINTERN
+# define SWIGINTERN static SWIGUNUSED
+#endif
+
+/* internal inline SWIG method */
+#ifndef SWIGINTERNINLINE
+# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE
+#endif
+
+/* exporting methods */
+#if defined(__GNUC__)
+# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# ifndef GCC_HASCLASSVISIBILITY
+# define GCC_HASCLASSVISIBILITY
+# endif
+# endif
+#endif
+
+#ifndef SWIGEXPORT
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# if defined(STATIC_LINKED)
+# define SWIGEXPORT
+# else
+# define SWIGEXPORT __declspec(dllexport)
+# endif
+# else
+# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY)
+# define SWIGEXPORT __attribute__ ((visibility("default")))
+# else
+# define SWIGEXPORT
+# endif
+# endif
+#endif
+
+/* calling conventions for Windows */
+#ifndef SWIGSTDCALL
+# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+# define SWIGSTDCALL __stdcall
+# else
+# define SWIGSTDCALL
+# endif
+#endif
+
+/* Deal with Microsoft's attempt at deprecating C standard runtime functions */
+#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE)
+# define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */
+#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE)
+# define _SCL_SECURE_NO_DEPRECATE
+#endif
+
+/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */
+#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES)
+# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0
+#endif
+
+/* Intel's compiler complains if a variable which was never initialised is
+ * cast to void, which is a common idiom which we use to indicate that we
+ * are aware a variable isn't used. So we just silence that warning.
+ * See: https://github.com/swig/swig/issues/192 for more discussion.
+ */
+#ifdef __INTEL_COMPILER
+# pragma warning disable 592
+#endif
+
+
+/* Fix for jlong on some versions of gcc on Windows */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+ typedef long long __int64;
+#endif
+
+/* Fix for jlong on 64-bit x86 Solaris */
+#if defined(__x86_64)
+# ifdef _LP64
+# undef _LP64
+# endif
+#endif
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+/* Support for throwing Java exceptions */
+typedef enum {
+ SWIG_JavaOutOfMemoryError = 1,
+ SWIG_JavaIOException,
+ SWIG_JavaRuntimeException,
+ SWIG_JavaIndexOutOfBoundsException,
+ SWIG_JavaArithmeticException,
+ SWIG_JavaIllegalArgumentException,
+ SWIG_JavaNullPointerException,
+ SWIG_JavaDirectorPureVirtual,
+ SWIG_JavaUnknownError
+} SWIG_JavaExceptionCodes;
+
+typedef struct {
+ SWIG_JavaExceptionCodes code;
+ const char *java_exception;
+} SWIG_JavaExceptions_t;
+
+
+static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) {
+ jclass excep;
+ static const SWIG_JavaExceptions_t java_exceptions[] = {
+ { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" },
+ { SWIG_JavaIOException, "java/io/IOException" },
+ { SWIG_JavaRuntimeException, "java/lang/RuntimeException" },
+ { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" },
+ { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" },
+ { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" },
+ { SWIG_JavaNullPointerException, "java/lang/NullPointerException" },
+ { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" },
+ { SWIG_JavaUnknownError, "java/lang/UnknownError" },
+ { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" }
+ };
+ const SWIG_JavaExceptions_t *except_ptr = java_exceptions;
+
+ while (except_ptr->code != code && except_ptr->code)
+ except_ptr++;
+
+ jenv->ExceptionClear();
+ excep = jenv->FindClass(except_ptr->java_exception);
+ if (excep)
+ jenv->ThrowNew(excep, msg);
+}
+
+
+/* Contract support */
+
+#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else
+
+
+#include <string>
+
+
+#include <typeinfo>
+#include <stdexcept>
+
+
+#include <cybozu/random_generator.hpp>
+#include <cybozu/crypto.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/ecparam.hpp>
+struct Param {
+const mcl::EcParam *ecParam;
+cybozu::RandomGenerator rg;
+cybozu::crypto::Hash::Name hashName;
+static inline Param& getParam()
+{
+ static Param p;
+ return p;
+}
+};
+
+#include "elgamal_impl.hpp"
+
+
+static bool *new_p_bool() {
+ return new bool();
+}
+
+static bool *copy_p_bool(bool value) {
+ return new bool(value);
+}
+
+static void delete_p_bool(bool *obj) {
+ if (obj) delete obj;
+}
+
+static void p_bool_assign(bool *obj, bool value) {
+ *obj = value;
+}
+
+static bool p_bool_value(bool *obj) {
+ return *obj;
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1p_1bool(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ bool *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (bool *)new_p_bool();
+ *(bool **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_copy_1p_1bool(JNIEnv *jenv, jclass jcls, jboolean jarg1) {
+ jlong jresult = 0 ;
+ bool arg1 ;
+ bool *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = jarg1 ? true : false;
+ result = (bool *)copy_p_bool(arg1);
+ *(bool **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1p_1bool(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ bool *arg1 = (bool *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(bool **)&jarg1;
+ delete_p_bool(arg1);
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1assign(JNIEnv *jenv, jclass jcls, jlong jarg1, jboolean jarg2) {
+ bool *arg1 = (bool *) 0 ;
+ bool arg2 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(bool **)&jarg1;
+ arg2 = jarg2 ? true : false;
+ p_bool_assign(arg1,arg2);
+}
+
+
+SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1value(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ jboolean jresult = 0 ;
+ bool *arg1 = (bool *) 0 ;
+ bool result;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(bool **)&jarg1;
+ result = (bool)p_bool_value(arg1);
+ jresult = (jboolean)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_SystemInit(JNIEnv *jenv, jclass jcls, jstring jarg1) {
+ std::string *arg1 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ if(!jarg1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0);
+ if (!arg1_pstr) return ;
+ std::string arg1_str(arg1_pstr);
+ arg1 = &arg1_str;
+ jenv->ReleaseStringUTFChars(jarg1, arg1_pstr);
+ try {
+ SystemInit((std::string const &)*arg1);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ CipherText *arg1 = (CipherText *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(CipherText **)&jarg1;
+ try {
+ result = ((CipherText const *)arg1)->toStr();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ CipherText *arg1 = (CipherText *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(CipherText **)&jarg1;
+ try {
+ result = ((CipherText const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ CipherText *arg1 = (CipherText *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(CipherText **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->fromStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1add(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ CipherText *arg1 = (CipherText *) 0 ;
+ CipherText *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(CipherText **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null");
+ return ;
+ }
+ try {
+ (arg1)->add((CipherText const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) {
+ CipherText *arg1 = (CipherText *) 0 ;
+ int arg2 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(CipherText **)&jarg1;
+ arg2 = (int)jarg2;
+ try {
+ (arg1)->mul(arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ CipherText *arg1 = (CipherText *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(CipherText **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->mul((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1CipherText(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ CipherText *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (CipherText *)new CipherText();
+ *(CipherText **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1CipherText(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ CipherText *arg1 = (CipherText *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(CipherText **)&jarg1;
+ delete arg1;
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PublicKey **)&jarg1;
+ try {
+ result = ((PublicKey const *)arg1)->toStr();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PublicKey **)&jarg1;
+ try {
+ result = ((PublicKey const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PublicKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->fromStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PublicKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ ((PublicKey const *)arg1)->save((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PublicKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->load((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ int arg3 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PublicKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null");
+ return ;
+ }
+ arg3 = (int)jarg3;
+ try {
+ ((PublicKey const *)arg1)->enc(*arg2,arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ std::string *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PublicKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null");
+ return ;
+ }
+ if(!jarg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0);
+ if (!arg3_pstr) return ;
+ std::string arg3_str(arg3_pstr);
+ arg3 = &arg3_str;
+ jenv->ReleaseStringUTFChars(jarg3, arg3_pstr);
+ try {
+ ((PublicKey const *)arg1)->enc(*arg2,(std::string const &)*arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1rerandomize(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ CipherText *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PublicKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null");
+ return ;
+ }
+ try {
+ ((PublicKey const *)arg1)->rerandomize(*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ int arg3 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PublicKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null");
+ return ;
+ }
+ arg3 = (int)jarg3;
+ try {
+ ((PublicKey const *)arg1)->add(*arg2,arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ std::string *arg3 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PublicKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null");
+ return ;
+ }
+ if(!jarg3) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0);
+ if (!arg3_pstr) return ;
+ std::string arg3_str(arg3_pstr);
+ arg3 = &arg3_str;
+ jenv->ReleaseStringUTFChars(jarg3, arg3_pstr);
+ try {
+ ((PublicKey const *)arg1)->add(*arg2,(std::string const &)*arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PublicKey(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ PublicKey *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (PublicKey *)new PublicKey();
+ *(PublicKey **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ PublicKey *arg1 = (PublicKey *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(PublicKey **)&jarg1;
+ delete arg1;
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ try {
+ result = ((PrivateKey const *)arg1)->toStr();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jstring jresult = 0 ;
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ std::string result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ try {
+ result = ((PrivateKey const *)arg1)->toString();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = jenv->NewStringUTF((&result)->c_str());
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->fromStr((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ ((PrivateKey const *)arg1)->save((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ std::string *arg2 = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ if(!jarg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
+ return ;
+ }
+ const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0);
+ if (!arg2_pstr) return ;
+ std::string arg2_str(arg2_pstr);
+ arg2 = &arg2_str;
+ jenv->ReleaseStringUTFChars(jarg2, arg2_pstr);
+ try {
+ (arg1)->load((std::string const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1init(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ try {
+ (arg1)->init();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1getPublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ jlong jresult = 0 ;
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ PublicKey result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ try {
+ result = ((PrivateKey const *)arg1)->getPublicKey();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ *(PublicKey **)&jresult = new PublicKey((const PublicKey &)result);
+ return jresult;
+}
+
+
+SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3) {
+ jint jresult = 0 ;
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ bool *arg3 = (bool *) 0 ;
+ int result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PrivateKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null");
+ return 0;
+ }
+ arg3 = *(bool **)&jarg3;
+ try {
+ result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2,arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = (jint)result;
+ return jresult;
+}
+
+
+SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) {
+ jint jresult = 0 ;
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ CipherText *arg2 = 0 ;
+ int result;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ (void)jarg2_;
+ arg1 = *(PrivateKey **)&jarg1;
+ arg2 = *(CipherText **)&jarg2;
+ if (!arg2) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null");
+ return 0;
+ }
+ try {
+ result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return 0;
+ }
+
+ jresult = (jint)result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1setCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+ int arg2 ;
+ int arg3 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ arg2 = (int)jarg2;
+ arg3 = (int)jarg3;
+ try {
+ (arg1)->setCache(arg2,arg3);
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1clearCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ (void)jarg1_;
+ arg1 = *(PrivateKey **)&jarg1;
+ try {
+ (arg1)->clearCache();
+ }
+ catch(std::exception &_e) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what());
+ return ;
+ }
+
+}
+
+
+SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PrivateKey(JNIEnv *jenv, jclass jcls) {
+ jlong jresult = 0 ;
+ PrivateKey *result = 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ result = (PrivateKey *)new PrivateKey();
+ *(PrivateKey **)&jresult = result;
+ return jresult;
+}
+
+
+SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PrivateKey(JNIEnv *jenv, jclass jcls, jlong jarg1) {
+ PrivateKey *arg1 = (PrivateKey *) 0 ;
+
+ (void)jenv;
+ (void)jcls;
+ arg1 = *(PrivateKey **)&jarg1;
+ delete arg1;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/java.md b/vendor/github.com/tangerine-network/mcl/ffi/java/java.md
new file mode 100644
index 000000000..3fe861351
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/java.md
@@ -0,0 +1,95 @@
+# JNI for mcl (experimental)
+This library provides functionality to compute the optimal ate pairing
+over Barreto-Naehrig (BN) curves.
+
+# Initialization
+Load the library `mcl_bn256`.
+```
+import com.herumi.mcl.*;
+
+System.loadLibrary("mcl_bn256");
+```
+
+# Classes
+* `G1` ; The cyclic group instantiated as E(Fp)[r] where where r = p + 1 - t.
+* `G2` ; The cyclic group instantiated as the inverse image of E'(Fp^2)[r].
+* `GT` ; The cyclic group in the image of the optimal ate pairing.
+ * `e : G1 x G2 -> GT`
+* `Fr` ; The finite field with characteristic r.
+
+# Methods and Functions
+## Fr
+* `Fr::setInt(int x)` ; set by x
+* `Fr::setStr(String str)` ; set by str such as "123", "0xfff", etc.
+* `Fr::setRand()` ; randomly set
+* `Bn256.neg(Fr y, Fr x)` ; `y = -x`
+* `Bn256.add(Fr z, Fr x, Fr y)` ; `z = x + y`
+* `Bn256.sub(Fr z, Fr x, Fr y)` ; `z = x - y`
+* `Bn256.mul(Fr z, Fr x, Fr y)` ; `z = x * y`
+* `Bn256.div(Fr z, Fr x, Fr y)` ; `z = x / y`
+
+## G1
+
+* `G1::set(String x, String y)` ; set by (x, y)
+* `G1::hashAndMapToG1(String m)` ; take SHA-256 of m and map it to an element of G1
+* `G1::setStr(String str)` ; set by the result of `toString()` method
+* `Bn256.neg(G1 y, G1 x)` ; `y = -x`
+* `Bn256.dbl(G1 y, G1 x)` ; `y = 2x`
+* `Bn256.add(G1 z, G1 x, G1 y)` ; `z = x + y`
+* `Bn256.sub(G1 z, G1 x, G1 y)` ; `z = x - y`
+* `Bn256.mul(G1 z, G1 x, Fr y)` ; `z = x * y`
+
+## G2
+
+* `G2::set(String xa, String xb, String ya, String yb)` ; set by ((xa, xb), (ya, yb))
+* `G2::setStr(String str)` ; set by the result of `toString()` method
+* `Bn256.neg(G2 y, G2 x)` ; `y = -x`
+* `Bn256.dbl(G2 y, G2 x)` ; `y = 2x`
+* `Bn256.add(G2 z, G2 x, G2 y)` ; `z = x + y`
+* `Bn256.sub(G2 z, G2 x, G2 y)` ; `z = x - y`
+* `Bn256.mul(G2 z, G2 x, Fr y)` ; `z = x * y`
+
+## GT
+
+* `GT::setStr(String str)` ; set by the result of `toString()` method
+* `Bn256.mul(GT z, GT x, GT y)` ; `z = x * y`
+* `Bn256.pow(GT z, GT x, Fr y)` ; `z = x ^ y`
+
+## pairing
+* `Bn256.pairing(GT e, G1 P, G2 Q)` ; e = e(P, Q)
+
+# BLS signature sample
+```
+String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441";
+String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671";
+String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736";
+String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616";
+
+G2 Q = new G2(xa, xb, ya, yb); // fixed point of G2
+
+Fr s = new Fr();
+s.setRand(); // secret key
+G2 pub = new G2();
+Bn256.mul(pub, Q, s); // public key = sQ
+
+String m = "signature test";
+G1 H = new G1();
+H.hashAndMapToG1(m); // H = Hash(m)
+G1 sign = new G1();
+Bn256.mul(sign, H, s); // signature of m = s H
+
+GT e1 = new GT();
+GT e2 = new GT();
+Bn256.pairing(e1, H, pub); // e1 = e(H, s Q)
+Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q);
+assertBool("verify signature", e1.equals(e2));
+```
+
+# Make test
+```
+cd java
+make test_bn256
+```
+
+# Sample code
+[Bn256Test.java](https://github.com/herumi/mcl/blob/master/java/Bn256Test.java)
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/make_wrap.bat b/vendor/github.com/tangerine-network/mcl/ffi/java/make_wrap.bat
new file mode 100644
index 000000000..b7008bc02
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/make_wrap.bat
@@ -0,0 +1,23 @@
+@echo off
+call set-java-path.bat
+set JAVA_INCLUDE=%JAVA_DIR%\include
+set SWIG=..\..\..\..\p\swig\swig.exe
+set PACKAGE_NAME=com.herumi.mcl
+set PACKAGE_DIR=%PACKAGE_NAME:.=\%
+if /i "%1"=="" (
+ set NAME=elgamal
+) else (
+ set NAME=%1
+)
+
+echo [[run swig]]
+mkdir %PACKAGE_DIR%
+set TOP_DIR=../..
+%SWIG% -java -package %PACKAGE_NAME% -outdir %PACKAGE_DIR% -c++ -Wall %NAME%.i
+echo [[make dll]]
+cl /MT /DNOMINMAX /LD /Ox /DNDEBUG /EHsc %NAME%_wrap.cxx %TOP_DIR%/src/fp.cpp -DMCL_NO_AUTOLINK -I%JAVA_INCLUDE% -I%JAVA_INCLUDE%\win32 -I%TOP_DIR%/include -I%TOP_DIR%/../cybozulib/include -I%TOP_DIR%/../cybozulib_ext/include -I%TOP_DIR%/../xbyak /link /LIBPATH:%TOP_DIR%/../cybozulib_ext/lib /OUT:%TOP_DIR%/bin/mcl_%NAME%.dll
+
+call run-%NAME%.bat
+
+echo [[make jar]]
+%JAVA_DIR%\bin\jar cvf mcl.jar com
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/run-bn256.bat b/vendor/github.com/tangerine-network/mcl/ffi/java/run-bn256.bat
new file mode 100644
index 000000000..903876ec6
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/run-bn256.bat
@@ -0,0 +1,9 @@
+@echo off
+echo [[compile Bn256Test.java]]
+%JAVA_DIR%\bin\javac Bn256Test.java
+
+echo [[run Bn256Test]]
+set TOP_DIR=..\..
+pushd %TOP_DIR%\bin
+%JAVA_DIR%\bin\java -classpath ../ffi/java Bn256Test %1 %2 %3 %4 %5 %6
+popd
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/run-elgamal.bat b/vendor/github.com/tangerine-network/mcl/ffi/java/run-elgamal.bat
new file mode 100644
index 000000000..8b889a64c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/run-elgamal.bat
@@ -0,0 +1,9 @@
+@echo off
+echo [[compile ElgamalTest.java]]
+%JAVA_DIR%\bin\javac ElgamalTest.java
+
+echo [[run ElgamalTest]]
+set TOP_DIR=..\..
+pushd %TOP_DIR%\bin
+%JAVA_DIR%\bin\java -classpath ../ffi/java ElgamalTest %1 %2 %3 %4 %5 %6
+popd
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/java/set-java-path.bat b/vendor/github.com/tangerine-network/mcl/ffi/java/set-java-path.bat
new file mode 100644
index 000000000..c66f81830
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/java/set-java-path.bat
@@ -0,0 +1,8 @@
+@echo off
+if "%JAVA_HOME%"=="" (
+ set JAVA_DIR=c:/p/Java/jdk
+) else (
+ set JAVA_DIR=%JAVA_HOME%
+)
+echo JAVA_DIR=%JAVA_DIR%
+rem set PATH=%PATH%;%JAVA_DIR%\bin
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/js/export-functions.py b/vendor/github.com/tangerine-network/mcl/ffi/js/export-functions.py
new file mode 100644
index 000000000..2a929564b
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/js/export-functions.py
@@ -0,0 +1,73 @@
+import sys, re, argparse
+
+#RE_PROTOTYPE = re.compile(r'MCLBN_DLL_API\s\w\s\w\([^)]*\);')
+RE_PROTOTYPE = re.compile(r'\w*\s(\w*)\s(\w*)\(([^)]*)\);')
+def export_functions(args, fileNames, reToAddUnderscore):
+ modName = args.js
+ json = args.json
+ if not reToAddUnderscore:
+ reToAddUnderscore = r'(mclBn_init|setStr|getStr|[sS]erialize|setLittleEndian|setHashOf|hashAndMapTo|DecStr|HexStr|HashTo|blsSign|blsVerify|GetCurveOrder|GetFieldOrder|KeyShare|KeyRecover|blsSignatureRecover|blsInit)'
+ reSpecialFunctionName = re.compile(reToAddUnderscore)
+ if json:
+ print '['
+ elif modName:
+ print 'function define_exported_' + modName + '(mod) {'
+ comma = ''
+ for fileName in fileNames:
+ with open(fileName, 'rb') as f:
+ for line in f.readlines():
+ p = RE_PROTOTYPE.search(line)
+ if p:
+ ret = p.group(1)
+ name = p.group(2)
+ arg = p.group(3)
+ if json or modName:
+ retType = 'null' if ret == 'void' else 'number'
+ if arg == '' or arg == 'void':
+ paramNum = 0
+ else:
+ paramNum = len(arg.split(','))
+ if reSpecialFunctionName.search(name):
+ exportName = '_' + name # to wrap function
+ else:
+ exportName = name
+ if json:
+ print comma + '{'
+ if comma == '':
+ comma = ','
+ print ' "name":"{0}",'.format(name)
+ print ' "exportName":"{0}",'.format(exportName)
+ print ' "ret":"{0}",'.format(retType)
+ print ' "args":[',
+ if paramNum > 0:
+ print '"number"' + (', "number"' * (paramNum - 1)),
+ print ']'
+ print '}'
+ else:
+ paramType = '[' + ("'number', " * paramNum) + ']'
+ print "{0} = mod.cwrap('{1}', '{2}', {3})".format(exportName, name, retType, paramType)
+ else:
+ print comma + "'_" + name + "'",
+ if comma == '':
+ comma = ','
+ if json:
+ print ']'
+ elif modName:
+ print '}'
+
+def main():
+ p = argparse.ArgumentParser('export_functions')
+ p.add_argument('header', type=str, nargs='+', help='headers')
+ p.add_argument('-js', type=str, nargs='?', help='module name')
+ p.add_argument('-re', type=str, nargs='?', help='regular expression file to add underscore to function name')
+ p.add_argument('-json', action='store_true', help='output json')
+ args = p.parse_args()
+
+ reToAddUnderscore = ''
+ if args.re:
+ reToAddUnderscore = open(args.re).read().strip()
+ export_functions(args, args.header, reToAddUnderscore)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/js/pre-mcl.js b/vendor/github.com/tangerine-network/mcl/ffi/js/pre-mcl.js
new file mode 100644
index 000000000..ebc93e581
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/js/pre-mcl.js
@@ -0,0 +1,5 @@
+if (typeof __dirname === 'string') {
+ var Module = {}
+ Module.wasmBinaryFile = __dirname + '/mcl_c.wasm'
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/python/pairing.py b/vendor/github.com/tangerine-network/mcl/ffi/python/pairing.py
new file mode 100644
index 000000000..88b729176
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/python/pairing.py
@@ -0,0 +1,80 @@
+from ctypes import *
+from ctypes.wintypes import LPWSTR, LPCSTR, LPVOID
+
+g_lib = None
+
+def BN256_init():
+ global g_lib
+ g_lib = cdll.LoadLibrary("../../bin/bn256.dll")
+ ret = g_lib.BN256_init()
+ if ret:
+ print "ERR BN256_init"
+
+class Fr(Structure):
+ _fields_ = [("v", c_ulonglong * 4)]
+ def setInt(self, v):
+ g_lib.BN256_Fr_setInt(self.v, v)
+ def setStr(self, s):
+ ret = g_lib.BN256_Fr_setStr(self.v, c_char_p(s))
+ if ret:
+ print("ERR Fr:setStr")
+ def __str__(self):
+ svLen = 1024
+ sv = create_string_buffer('\0' * svLen)
+ ret = g_lib.BN256_Fr_getStr(sv, svLen, self.v)
+ if ret:
+ print("ERR Fr:getStr")
+ return sv.value
+ def isZero(self, rhs):
+ return g_lib.BN256_Fr_isZero(self.v) != 0
+ def isOne(self, rhs):
+ return g_lib.BN256_Fr_isOne(self.v) != 0
+ def __eq__(self, rhs):
+ return g_lib.BN256_Fr_isEqual(self.v, rhs.v) != 0
+ def __ne__(self, rhs):
+ return not(P == Q)
+ def __add__(self, rhs):
+ ret = Fr()
+ g_lib.BN256_Fr_add(ret.v, self.v, rhs.v)
+ return ret
+ def __sub__(self, rhs):
+ ret = Fr()
+ g_lib.BN256_Fr_sub(ret.v, self.v, rhs.v)
+ return ret
+ def __mul__(self, rhs):
+ ret = Fr()
+ g_lib.BN256_Fr_mul(ret.v, self.v, rhs.v)
+ return ret
+ def __div__(self, rhs):
+ ret = Fr()
+ g_lib.BN256_Fr_div(ret.v, self.v, rhs.v)
+ return ret
+ def __neg__(self):
+ ret = Fr()
+ g_lib.BN256_Fr_neg(ret.v, self.v)
+ return ret
+
+def Fr_add(z, x, y):
+ g_lib.BN256_Fr_add(z.v, x.v, y.v)
+
+def Fr_sub(z, x, y):
+ g_lib.BN256_Fr_sub(z.v, x.v, y.v)
+
+def Fr_mul(z, x, y):
+ g_lib.BN256_Fr_mul(z.v, x.v, y.v)
+
+def Fr_div(z, x, y):
+ g_lib.BN256_Fr_div(z.v, x.v, y.v)
+
+BN256_init()
+
+P = Fr()
+Q = Fr()
+print P == Q
+print P != Q
+P.setInt(5)
+Q.setStr("34982034824")
+print Q
+R = Fr()
+Fr_add(R, P, Q)
+print R
diff --git a/vendor/github.com/tangerine-network/mcl/ffi/python/she.py b/vendor/github.com/tangerine-network/mcl/ffi/python/she.py
new file mode 100644
index 000000000..ab8975274
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/ffi/python/she.py
@@ -0,0 +1,298 @@
+import os
+import platform
+from ctypes import *
+
+MCL_BN254 = 0
+MCLBN_FR_UNIT_SIZE = 4
+MCLBN_FP_UNIT_SIZE = 4
+
+FR_SIZE = MCLBN_FR_UNIT_SIZE
+G1_SIZE = MCLBN_FP_UNIT_SIZE * 3
+G2_SIZE = MCLBN_FP_UNIT_SIZE * 6
+GT_SIZE = MCLBN_FP_UNIT_SIZE * 12
+
+SEC_SIZE = FR_SIZE * 2
+PUB_SIZE = G1_SIZE + G2_SIZE
+G1_CIPHER_SIZE = G1_SIZE * 2
+G2_CIPHER_SIZE = G2_SIZE * 2
+GT_CIPHER_SIZE = GT_SIZE * 4
+
+MCLBN_COMPILED_TIME_VAR = (MCLBN_FR_UNIT_SIZE * 10) + MCLBN_FP_UNIT_SIZE
+
+Buffer = c_ubyte * 1536
+lib = None
+
+def init(curveType=MCL_BN254):
+ global lib
+ name = platform.system()
+ if name == 'Linux':
+ libName = 'libmclshe256.so'
+ elif name == 'Darwin':
+ libName = 'libmclshe256.dylib'
+ elif name == 'Windows':
+ libName = 'mclshe256.dll'
+ else:
+ raise RuntimeError("not support yet", name)
+ lib = cdll.LoadLibrary(libName)
+ ret = lib.sheInit(MCL_BN254, MCLBN_COMPILED_TIME_VAR)
+ if ret != 0:
+ raise RuntimeError("sheInit", ret)
+ # custom setup for a function which returns pointer
+ lib.shePrecomputedPublicKeyCreate.restype = c_void_p
+
+def setRangeForDLP(hashSize):
+ ret = lib.sheSetRangeForDLP(hashSize)
+ if ret != 0:
+ raise RuntimeError("setRangeForDLP", ret)
+
+def setTryNum(tryNum):
+ ret = lib.sheSetTryNum(tryNum)
+ if ret != 0:
+ raise RuntimeError("setTryNum", ret)
+
+def hexStr(v):
+ s = ""
+ for x in v:
+ s += format(x, '02x')
+ return s
+
+class CipherTextG1(Structure):
+ _fields_ = [("v", c_ulonglong * G1_CIPHER_SIZE)]
+ def serialize(self):
+ buf = Buffer()
+ ret = lib.sheCipherTextG1Serialize(byref(buf), len(buf), byref(self.v))
+ if ret == 0:
+ raise RuntimeError("serialize")
+ return buf[0:ret]
+ def serializeToHexStr(self):
+ return hexStr(self.serialize())
+
+class CipherTextG2(Structure):
+ _fields_ = [("v", c_ulonglong * G2_CIPHER_SIZE)]
+ def serialize(self):
+ buf = Buffer()
+ ret = lib.sheCipherTextG2Serialize(byref(buf), len(buf), byref(self.v))
+ if ret == 0:
+ raise RuntimeError("serialize")
+ return buf[0:ret]
+ def serializeToHexStr(self):
+ return hexStr(self.serialize())
+
+class CipherTextGT(Structure):
+ _fields_ = [("v", c_ulonglong * GT_CIPHER_SIZE)]
+ def serialize(self):
+ buf = Buffer()
+ ret = lib.sheCipherTextGTSerialize(byref(buf), len(buf), byref(self.v))
+ if ret == 0:
+ raise RuntimeError("serialize")
+ return buf[0:ret]
+ def serializeToHexStr(self):
+ return hexStr(self.serialize())
+
+class PrecomputedPublicKey(Structure):
+ def __init__(self):
+ self.p = 0
+ def create(self):
+ if not self.p:
+ self.p = c_void_p(lib.shePrecomputedPublicKeyCreate())
+ if self.p == 0:
+ raise RuntimeError("PrecomputedPublicKey::create")
+ def destroy(self):
+ lib.shePrecomputedPublicKeyDestroy(self.p)
+ def encG1(self, m):
+ c = CipherTextG1()
+ ret = lib.shePrecomputedPublicKeyEncG1(byref(c.v), self.p, m)
+ if ret != 0:
+ raise RuntimeError("encG1", m)
+ return c
+ def encG2(self, m):
+ c = CipherTextG2()
+ ret = lib.shePrecomputedPublicKeyEncG2(byref(c.v), self.p, m)
+ if ret != 0:
+ raise RuntimeError("encG2", m)
+ return c
+ def encGT(self, m):
+ c = CipherTextGT()
+ ret = lib.shePrecomputedPublicKeyEncGT(byref(c.v), self.p, m)
+ if ret != 0:
+ raise RuntimeError("encGT", m)
+ return c
+
+class PublicKey(Structure):
+ _fields_ = [("v", c_ulonglong * PUB_SIZE)]
+ def serialize(self):
+ buf = Buffer()
+ ret = lib.shePublicKeySerialize(byref(buf), len(buf), byref(self.v))
+ if ret == 0:
+ raise RuntimeError("serialize")
+ return buf[0:ret]
+ def serializeToHexStr(self):
+ return hexStr(self.serialize())
+ def encG1(self, m):
+ c = CipherTextG1()
+ ret = lib.sheEncG1(byref(c.v), byref(self.v), m)
+ if ret != 0:
+ raise RuntimeError("encG1", m)
+ return c
+ def encG2(self, m):
+ c = CipherTextG2()
+ ret = lib.sheEncG2(byref(c.v), byref(self.v), m)
+ if ret != 0:
+ raise RuntimeError("encG2", m)
+ return c
+ def encGT(self, m):
+ c = CipherTextGT()
+ ret = lib.sheEncGT(byref(c.v), byref(self.v), m)
+ if ret != 0:
+ raise RuntimeError("encGT", m)
+ return c
+ def createPrecomputedPublicKey(self):
+ ppub = PrecomputedPublicKey()
+ ppub.create()
+ ret = lib.shePrecomputedPublicKeyInit(ppub.p, byref(self.v))
+ if ret != 0:
+ raise RuntimeError("createPrecomputedPublicKey")
+ return ppub
+
+class SecretKey(Structure):
+ _fields_ = [("v", c_ulonglong * SEC_SIZE)]
+ def setByCSPRNG(self):
+ ret = lib.sheSecretKeySetByCSPRNG(byref(self.v))
+ if ret != 0:
+ raise RuntimeError("setByCSPRNG", ret)
+ def serialize(self):
+ buf = Buffer()
+ ret = lib.sheSecretKeySerialize(byref(buf), len(buf), byref(self.v))
+ if ret == 0:
+ raise RuntimeError("serialize")
+ return buf[0:ret]
+ def serializeToHexStr(self):
+ return hexStr(self.serialize())
+ def getPulicKey(self):
+ pub = PublicKey()
+ lib.sheGetPublicKey(byref(pub.v), byref(self.v))
+ return pub
+ def dec(self, c):
+ m = c_longlong()
+ if isinstance(c, CipherTextG1):
+ ret = lib.sheDecG1(byref(m), byref(self.v), byref(c.v))
+ elif isinstance(c, CipherTextG2):
+ ret = lib.sheDecG2(byref(m), byref(self.v), byref(c.v))
+ elif isinstance(c, CipherTextGT):
+ ret = lib.sheDecGT(byref(m), byref(self.v), byref(c.v))
+ if ret != 0:
+ raise RuntimeError("dec")
+ return m.value
+
+def neg(c):
+ ret = -1
+ if isinstance(c, CipherTextG1):
+ out = CipherTextG1()
+ ret = lib.sheNegG1(byref(out.v), byref(c.v))
+ elif isinstance(c, CipherTextG2):
+ out = CipherTextG2()
+ ret = lib.sheNegG2(byref(out.v), byref(c.v))
+ elif isinstance(c, CipherTextGT):
+ out = CipherTextGT()
+ ret = lib.sheNegGT(byref(out.v), byref(c.v))
+ if ret != 0:
+ raise RuntimeError("neg")
+ return out
+
+def add(cx, cy):
+ ret = -1
+ if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1):
+ out = CipherTextG1()
+ ret = lib.sheAddG1(byref(out.v), byref(cx.v), byref(cy.v))
+ elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2):
+ out = CipherTextG2()
+ ret = lib.sheAddG2(byref(out.v), byref(cx.v), byref(cy.v))
+ elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT):
+ out = CipherTextGT()
+ ret = lib.sheAddGT(byref(out.v), byref(cx.v), byref(cy.v))
+ if ret != 0:
+ raise RuntimeError("add")
+ return out
+
+def sub(cx, cy):
+ ret = -1
+ if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1):
+ out = CipherTextG1()
+ ret = lib.sheSubG1(byref(out.v), byref(cx.v), byref(cy.v))
+ elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2):
+ out = CipherTextG2()
+ ret = lib.sheSubG2(byref(out.v), byref(cx.v), byref(cy.v))
+ elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT):
+ out = CipherTextGT()
+ ret = lib.sheSubGT(byref(out.v), byref(cx.v), byref(cy.v))
+ if ret != 0:
+ raise RuntimeError("sub")
+ return out
+
+def mul(cx, cy):
+ ret = -1
+ if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG2):
+ out = CipherTextGT()
+ ret = lib.sheMul(byref(out.v), byref(cx.v), byref(cy.v))
+ elif isinstance(cx, CipherTextG1) and isinstance(cy, int):
+ out = CipherTextG1()
+ ret = lib.sheMulG1(byref(out.v), byref(cx.v), cy)
+ elif isinstance(cx, CipherTextG2) and isinstance(cy, int):
+ out = CipherTextG2()
+ ret = lib.sheMulG2(byref(out.v), byref(cx.v), cy)
+ elif isinstance(cx, CipherTextGT) and isinstance(cy, int):
+ out = CipherTextGT()
+ ret = lib.sheMulGT(byref(out.v), byref(cx.v), cy)
+ if ret != 0:
+ raise RuntimeError("mul")
+ return out
+
+if __name__ == '__main__':
+ init()
+ sec = SecretKey()
+ sec.setByCSPRNG()
+ print("sec=", sec.serializeToHexStr())
+ pub = sec.getPulicKey()
+ print("pub=", pub.serializeToHexStr())
+
+ m11 = 1
+ m12 = 5
+ m21 = 3
+ m22 = -4
+ c11 = pub.encG1(m11)
+ c12 = pub.encG1(m12)
+ # dec(enc) for G1
+ if sec.dec(c11) != m11: print("err1")
+
+ # add/sub for G1
+ if sec.dec(add(c11, c12)) != m11 + m12: print("err2")
+ if sec.dec(sub(c11, c12)) != m11 - m12: print("err3")
+
+ # add/sub for G2
+ c21 = pub.encG2(m21)
+ c22 = pub.encG2(m22)
+ if sec.dec(c21) != m21: print("err4")
+ if sec.dec(add(c21, c22)) != m21 + m22: print("err5")
+ if sec.dec(sub(c21, c22)) != m21 - m22: print("err6")
+
+ mt = -56
+ ct = pub.encGT(mt)
+ if sec.dec(ct) != mt: print("err7")
+
+ # mul G1 and G2
+ if sec.dec(mul(c11, c21)) != m11 * m21: print("err8")
+
+ # use precomputedPublicKey for performance
+ ppub = pub.createPrecomputedPublicKey()
+ c1 = ppub.encG1(m11)
+ if sec.dec(c1) != m11: print("err9")
+
+ import sys
+ if sys.version_info.major >= 3:
+ import timeit
+ N = 100000
+ print(str(timeit.timeit("pub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec")
+ print(str(timeit.timeit("ppub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec")
+
+ ppub.destroy() # necessary to avoid memory leak
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/array.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/array.hpp
new file mode 100644
index 000000000..30df3667d
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/array.hpp
@@ -0,0 +1,197 @@
+#pragma once
+
+/**
+ @file
+ @brief scoped array and aligned array
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <new>
+#include <utility>
+#ifdef _WIN32
+ #include <malloc.h>
+#else
+ #include <stdlib.h>
+#endif
+#include <cybozu/inttype.hpp>
+
+namespace cybozu {
+
+inline void *AlignedMalloc(size_t size, size_t alignment)
+{
+#ifdef _WIN32
+ return _aligned_malloc(size, alignment);
+#else
+ void *p;
+ int ret = posix_memalign(&p, alignment, size);
+ return (ret == 0) ? p : 0;
+#endif
+}
+
+inline void AlignedFree(void *p)
+{
+#ifdef _WIN32
+ if (p == 0) return;
+ _aligned_free(p);
+#else
+ free(p);
+#endif
+}
+
+template<class T>
+class ScopedArray {
+ T *p_;
+ size_t size_;
+ ScopedArray(const ScopedArray&);
+ void operator=(const ScopedArray&);
+public:
+ explicit ScopedArray(size_t size)
+ : p_(new T[size])
+ , size_(size)
+ {
+ }
+ ~ScopedArray()
+ {
+ delete[] p_;
+ }
+ T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; }
+ const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; }
+ size_t size() const CYBOZU_NOEXCEPT { return size_; }
+ bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; }
+ T* begin() CYBOZU_NOEXCEPT { return p_; }
+ T* end() CYBOZU_NOEXCEPT { return p_ + size_; }
+ const T* begin() const CYBOZU_NOEXCEPT { return p_; }
+ const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; }
+ T* data() CYBOZU_NOEXCEPT { return p_; }
+ const T* data() const CYBOZU_NOEXCEPT { return p_; }
+};
+
+/**
+ T must be POD type
+ 16byte aligment array
+*/
+template<class T, size_t N = 16, bool defaultDoClear = true>
+class AlignedArray {
+ T *p_;
+ size_t size_;
+ size_t allocSize_;
+ T *alloc(size_t size) const
+ {
+ T *p = static_cast<T*>(AlignedMalloc(size * sizeof(T), N));
+ if (p == 0) throw std::bad_alloc();
+ return p;
+ }
+ void copy(T *dst, const T *src, size_t n) const
+ {
+ for (size_t i = 0; i < n; i++) dst[i] = src[i];
+ }
+ void setZero(T *p, size_t n) const
+ {
+ for (size_t i = 0; i < n; i++) p[i] = 0;
+ }
+ /*
+ alloc allocN and copy [p, p + copyN) to new p_
+ don't modify size_
+ */
+ void allocCopy(size_t allocN, const T *p, size_t copyN)
+ {
+ T *q = alloc(allocN);
+ copy(q, p, copyN);
+ AlignedFree(p_);
+ p_ = q;
+ allocSize_ = allocN;
+ }
+public:
+ /*
+ don't clear buffer with zero if doClear is false
+ */
+ explicit AlignedArray(size_t size = 0, bool doClear = defaultDoClear)
+ : p_(0)
+ , size_(0)
+ , allocSize_(0)
+ {
+ resize(size, doClear);
+ }
+ AlignedArray(const AlignedArray& rhs)
+ : p_(0)
+ , size_(0)
+ , allocSize_(0)
+ {
+ *this = rhs;
+ }
+ AlignedArray& operator=(const AlignedArray& rhs)
+ {
+ if (allocSize_ < rhs.size_) {
+ allocCopy(rhs.size_, rhs.p_, rhs.size_);
+ } else {
+ copy(p_, rhs.p_, rhs.size_);
+ }
+ size_ = rhs.size_;
+ return *this;
+ }
+#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11)
+ AlignedArray(AlignedArray&& rhs) CYBOZU_NOEXCEPT
+ : p_(rhs.p_)
+ , size_(rhs.size_)
+ , allocSize_(rhs.allocSize_)
+ {
+ rhs.p_ = 0;
+ rhs.size_ = 0;
+ rhs.allocSize_ = 0;
+ }
+ AlignedArray& operator=(AlignedArray&& rhs) CYBOZU_NOEXCEPT
+ {
+ swap(rhs);
+ rhs.clear();
+ return *this;
+ }
+#endif
+ /*
+ don't clear buffer with zero if doClear is false
+ @note don't free if shrinked
+ */
+ void resize(size_t size, bool doClear = defaultDoClear)
+ {
+ // shrink
+ if (size <= size_) {
+ size_ = size;
+ return;
+ }
+ // realloc if necessary
+ if (size > allocSize_) {
+ allocCopy(size, p_, size_);
+ }
+ if (doClear) setZero(p_ + size_, size - size_);
+ size_ = size;
+ }
+ void clear() // not free
+ {
+ size_ = 0;
+ }
+ ~AlignedArray()
+ {
+ AlignedFree(p_);
+ }
+ void swap(AlignedArray& rhs) CYBOZU_NOEXCEPT
+ {
+ std::swap(p_, rhs.p_);
+ std::swap(size_, rhs.size_);
+ std::swap(allocSize_, rhs.allocSize_);
+ }
+ T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; }
+ const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; }
+ size_t size() const CYBOZU_NOEXCEPT { return size_; }
+ bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; }
+ T* begin() CYBOZU_NOEXCEPT { return p_; }
+ T* end() CYBOZU_NOEXCEPT { return p_ + size_; }
+ const T* begin() const CYBOZU_NOEXCEPT { return p_; }
+ const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; }
+ T* data() CYBOZU_NOEXCEPT { return p_; }
+ const T* data() const CYBOZU_NOEXCEPT { return p_; }
+#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11)
+ const T* cbegin() const CYBOZU_NOEXCEPT { return p_; }
+ const T* cend() const CYBOZU_NOEXCEPT { return p_ + size_; }
+#endif
+};
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/atoi.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/atoi.hpp
new file mode 100644
index 000000000..a22853a17
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/atoi.hpp
@@ -0,0 +1,239 @@
+#pragma once
+/**
+ @file
+ @brief converter between integer and string
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+
+#include <memory.h>
+#include <limits.h>
+#include <limits>
+#include <cybozu/exception.hpp>
+
+namespace cybozu {
+
+namespace atoi_local {
+
+template<typename T, size_t n>
+T convertToInt(bool *b, const char *p, size_t size, const char (&max)[n], T min, T overflow1, char overflow2)
+{
+ if (size > 0 && *p) {
+ bool isMinus = false;
+ size_t i = 0;
+ if (*p == '-') {
+ isMinus = true;
+ i++;
+ }
+ if (i < size && p[i]) {
+ // skip leading zero
+ while (i < size && p[i] == '0') i++;
+ // check minimum
+ if (isMinus && size - i >= n - 1 && memcmp(max, &p[i], n - 1) == 0) {
+ if (b) *b = true;
+ return min;
+ }
+ T x = 0;
+ for (;;) {
+ unsigned char c;
+ if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') {
+ if (b) *b = true;
+ return isMinus ? -x : x;
+ }
+ unsigned int y = c - '0';
+ if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) {
+ break;
+ }
+ x = x * 10 + T(y);
+ i++;
+ }
+ }
+ }
+ if (b) {
+ *b = false;
+ return 0;
+ } else {
+ throw cybozu::Exception("atoi::convertToInt") << cybozu::exception::makeString(p, size);
+ }
+}
+
+template<typename T>
+T convertToUint(bool *b, const char *p, size_t size, T overflow1, char overflow2)
+{
+ if (size > 0 && *p) {
+ size_t i = 0;
+ // skip leading zero
+ while (i < size && p[i] == '0') i++;
+ T x = 0;
+ for (;;) {
+ unsigned char c;
+ if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') {
+ if (b) *b = true;
+ return x;
+ }
+ unsigned int y = c - '0';
+ if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) {
+ break;
+ }
+ x = x * 10 + T(y);
+ i++;
+ }
+ }
+ if (b) {
+ *b = false;
+ return 0;
+ } else {
+ throw cybozu::Exception("atoi::convertToUint") << cybozu::exception::makeString(p, size);
+ }
+}
+
+template<typename T>
+T convertHexToInt(bool *b, const char *p, size_t size)
+{
+ if (size > 0 && *p) {
+ size_t i = 0;
+ T x = 0;
+ for (;;) {
+ unsigned int c;
+ if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') {
+ if (b) *b = true;
+ return x;
+ }
+ if (c - 'A' <= 'F' - 'A') {
+ c = (c - 'A') + 10;
+ } else if (c - 'a' <= 'f' - 'a') {
+ c = (c - 'a') + 10;
+ } else if (c - '0' <= '9' - '0') {
+ c = c - '0';
+ } else {
+ break;
+ }
+ // avoid overflow
+ if (x > (std::numeric_limits<T>::max)() / 16) break;
+ x = x * 16 + T(c);
+ i++;
+ }
+ }
+ if (b) {
+ *b = false;
+ return 0;
+ } else {
+ throw cybozu::Exception("atoi::convertHexToInt") << cybozu::exception::makeString(p, size);
+ }
+}
+
+} // atoi_local
+
+/**
+ auto detect return value class
+ @note if you set bool pointer p then throw nothing and set *p = false if bad string
+*/
+class atoi {
+ const char *p_;
+ size_t size_;
+ bool *b_;
+ void set(bool *b, const char *p, size_t size)
+ {
+ b_ = b;
+ p_ = p;
+ size_ = size;
+ }
+public:
+ atoi(const char *p, size_t size = -1)
+ {
+ set(0, p, size);
+ }
+ atoi(bool *b, const char *p, size_t size = -1)
+ {
+ set(b, p, size);
+ }
+ atoi(const std::string& str)
+ {
+ set(0, str.c_str(), str.size());
+ }
+ atoi(bool *b, const std::string& str)
+ {
+ set(b, str.c_str(), str.size());
+ }
+ inline operator signed char() const
+ {
+ return atoi_local::convertToInt<signed char>(b_, p_, size_, "128", -128, 12, '8');
+ }
+ inline operator unsigned char() const
+ {
+ return atoi_local::convertToUint<unsigned char>(b_, p_, size_, 25, '6');
+ }
+ inline operator short() const
+ {
+ return atoi_local::convertToInt<short>(b_, p_, size_, "32768", -32768, 3276, '8');
+ }
+ inline operator unsigned short() const
+ {
+ return atoi_local::convertToUint<unsigned short>(b_, p_, size_, 6553, '6');
+ }
+ inline operator int() const
+ {
+ return atoi_local::convertToInt<int>(b_, p_, size_, "2147483648", INT_MIN, 214748364, '8');
+ }
+ inline operator unsigned int() const
+ {
+ return atoi_local::convertToUint<unsigned int>(b_, p_, size_, 429496729, '6');
+ }
+ inline operator long long() const
+ {
+ return atoi_local::convertToInt<long long>(b_, p_, size_, "9223372036854775808", LLONG_MIN, 922337203685477580LL, '8');
+ }
+ inline operator unsigned long long() const
+ {
+ return atoi_local::convertToUint<unsigned long long>(b_, p_, size_, 1844674407370955161ULL, '6');
+ }
+#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8)
+ inline operator long() const { return static_cast<long>(static_cast<long long>(*this)); }
+ inline operator unsigned long() const { return static_cast<unsigned long>(static_cast<unsigned long long>(*this)); }
+#else
+ inline operator long() const { return static_cast<long>(static_cast<int>(*this)); }
+ inline operator unsigned long() const { return static_cast<unsigned long>(static_cast<unsigned int>(*this)); }
+#endif
+};
+
+class hextoi {
+ const char *p_;
+ size_t size_;
+ bool *b_;
+ void set(bool *b, const char *p, size_t size)
+ {
+ b_ = b;
+ p_ = p;
+ size_ = size;
+ }
+public:
+ hextoi(const char *p, size_t size = -1)
+ {
+ set(0, p, size);
+ }
+ hextoi(bool *b, const char *p, size_t size = -1)
+ {
+ set(b, p, size);
+ }
+ hextoi(const std::string& str)
+ {
+ set(0, str.c_str(), str.size());
+ }
+ hextoi(bool *b, const std::string& str)
+ {
+ set(b, str.c_str(), str.size());
+ }
+ operator unsigned char() const { return atoi_local::convertHexToInt<unsigned char>(b_, p_, size_); }
+ operator unsigned short() const { return atoi_local::convertHexToInt<unsigned short>(b_, p_, size_); }
+ operator unsigned int() const { return atoi_local::convertHexToInt<unsigned int>(b_, p_, size_); }
+ operator unsigned long() const { return atoi_local::convertHexToInt<unsigned long>(b_, p_, size_); }
+ operator unsigned long long() const { return atoi_local::convertHexToInt<unsigned long long>(b_, p_, size_); }
+ operator char() const { return atoi_local::convertHexToInt<char>(b_, p_, size_); }
+ operator signed char() const { return atoi_local::convertHexToInt<signed char>(b_, p_, size_); }
+ operator short() const { return atoi_local::convertHexToInt<short>(b_, p_, size_); }
+ operator int() const { return atoi_local::convertHexToInt<int>(b_, p_, size_); }
+ operator long() const { return atoi_local::convertHexToInt<long>(b_, p_, size_); }
+ operator long long() const { return atoi_local::convertHexToInt<long long>(b_, p_, size_); }
+};
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/benchmark.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/benchmark.hpp
new file mode 100644
index 000000000..4c02f1869
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/benchmark.hpp
@@ -0,0 +1,212 @@
+#pragma once
+/**
+ @file
+ @brief measure exec time of function
+ @author MITSUNARI Shigeo
+*/
+#if defined(_MSC_VER) && (MSC_VER <= 1500)
+ #include <cybozu/inttype.hpp>
+#else
+ #include <stdint.h>
+#endif
+#include <stdio.h>
+
+#ifdef __EMSCRIPTEN__
+ #define CYBOZU_BENCH_USE_GETTIMEOFDAY
+#endif
+
+#ifdef CYBOZU_BENCH_USE_GETTIMEOFDAY
+ #include <sys/time.h>
+#elif !defined(CYBOZU_BENCH_DONT_USE_RDTSC)
+ #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__)
+ #define CYBOZU_BENCH_USE_RDTSC
+ #define CYBOZU_BENCH_USE_CPU_TIMER
+ #endif
+ #if defined(__GNUC__) && defined(__ARM_ARCH_7A__)
+// #define CYBOZU_BENCH_USE_MRC
+// #define CYBOZU_BENCH_USE_CPU_TIMER
+ #endif
+#endif
+
+
+#include <assert.h>
+#include <time.h>
+#ifdef _MSC_VER
+ #include <intrin.h>
+ #include <sys/timeb.h>
+#else
+#endif
+
+#ifndef CYBOZU_UNUSED
+ #ifdef __GNUC__
+ #define CYBOZU_UNUSED __attribute__((unused))
+ #else
+ #define CYBOZU_UNUSED
+ #endif
+#endif
+
+namespace cybozu {
+
+namespace bench {
+
+static void (*g_putCallback)(double);
+
+static inline void setPutCallback(void (*f)(double))
+{
+ g_putCallback = f;
+}
+
+} // cybozu::bench
+
+class CpuClock {
+public:
+ static inline uint64_t getCpuClk()
+ {
+#ifdef CYBOZU_BENCH_USE_RDTSC
+#ifdef _MSC_VER
+ return __rdtsc();
+#else
+ unsigned int eax, edx;
+ __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx));
+ return ((uint64_t)edx << 32) | eax;
+#endif
+#elif defined(CYBOZU_BENCH_USE_MRC)
+ uint32_t clk;
+ __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(clk));
+ return clk;
+#else
+#ifdef _MSC_VER
+ struct _timeb timeb;
+ _ftime_s(&timeb);
+ return uint64_t(timeb.time) * 1000000000 + timeb.millitm * 1000000;
+#elif defined(CYBOZU_BENCH_USE_GETTIMEOFDAY)
+ struct timeval tv;
+ int ret CYBOZU_UNUSED = gettimeofday(&tv, 0);
+ assert(ret == 0);
+ return uint64_t(tv.tv_sec) * 1000000000 + tv.tv_usec * 1000;
+#else
+ struct timespec tp;
+ int ret CYBOZU_UNUSED = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);
+ assert(ret == 0);
+ return uint64_t(tp.tv_sec) * 1000000000 + tp.tv_nsec;
+#endif
+#endif
+ }
+ CpuClock()
+ : clock_(0)
+ , count_(0)
+ {
+ }
+ void begin()
+ {
+ clock_ -= getCpuClk();
+ }
+ void end()
+ {
+ clock_ += getCpuClk();
+ count_++;
+ }
+ int getCount() const { return count_; }
+ uint64_t getClock() const { return clock_; }
+ void clear() { count_ = 0; clock_ = 0; }
+ void put(const char *msg = 0, int N = 1) const
+ {
+ double t = getClock() / double(getCount()) / N;
+ if (msg && *msg) printf("%s ", msg);
+ if (bench::g_putCallback) {
+ bench::g_putCallback(t);
+ return;
+ }
+#ifdef CYBOZU_BENCH_USE_CPU_TIMER
+ if (t > 1e6) {
+ printf("%7.3fMclk", t * 1e-6);
+ } else if (t > 1e3) {
+ printf("%7.3fKclk", t * 1e-3);
+ } else {
+ printf("%6.2f clk", t);
+ }
+#else
+ if (t > 1e6) {
+ printf("%7.3fmsec", t * 1e-6);
+ } else if (t > 1e3) {
+ printf("%7.3fusec", t * 1e-3);
+ } else {
+ printf("%6.2fnsec", t);
+ }
+#endif
+ if (msg && *msg) printf("\n");
+ }
+ // adhoc constatns for CYBOZU_BENCH
+#ifdef CYBOZU_BENCH_USE_CPU_TIMER
+ static const int loopN1 = 1000;
+ static const int loopN2 = 100;
+ static const uint64_t maxClk = (uint64_t)1e8;
+#else
+ static const int loopN1 = 100;
+ static const int loopN2 = 100;
+ static const uint64_t maxClk = (uint64_t)1e8;
+#endif
+private:
+ uint64_t clock_;
+ int count_;
+};
+
+namespace bench {
+
+static CpuClock g_clk;
+static int CYBOZU_UNUSED g_loopNum;
+
+} // cybozu::bench
+/*
+ loop counter is automatically determined
+ CYBOZU_BENCH(<msg>, <func>, <param1>, <param2>, ...);
+ if msg == "" then only set g_clk, g_loopNum
+*/
+#define CYBOZU_BENCH(msg, func, ...) \
+{ \
+ const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \
+ cybozu::CpuClock _cybozu_clk; \
+ for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \
+ _cybozu_clk.begin(); \
+ for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \
+ _cybozu_clk.end(); \
+ if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \
+ } \
+ if (msg && *msg) _cybozu_clk.put(msg, cybozu::CpuClock::loopN1); \
+ cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = cybozu::CpuClock::loopN1; \
+}
+
+/*
+ double clk;
+ CYBOZU_BENCH_T(clk, <func>, <param1>, <param2>, ...);
+ clk is set by CYBOZU_BENCH_T
+*/
+#define CYBOZU_BENCH_T(clk, func, ...) \
+{ \
+ const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \
+ cybozu::CpuClock _cybozu_clk; \
+ for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \
+ _cybozu_clk.begin(); \
+ for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \
+ _cybozu_clk.end(); \
+ if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \
+ } \
+ clk = _cybozu_clk.getClock() / (double)_cybozu_clk.getCount() / cybozu::CpuClock::loopN1; \
+}
+
+/*
+ loop counter N is given
+ CYBOZU_BENCH_C(<msg>, <counter>, <func>, <param1>, <param2>, ...);
+ if msg == "" then only set g_clk, g_loopNum
+*/
+#define CYBOZU_BENCH_C(msg, _N, func, ...) \
+{ \
+ cybozu::CpuClock _cybozu_clk; \
+ _cybozu_clk.begin(); \
+ for (int _cybozu_j = 0; _cybozu_j < _N; _cybozu_j++) { func(__VA_ARGS__); } \
+ _cybozu_clk.end(); \
+ if (msg && *msg) _cybozu_clk.put(msg, _N); \
+ cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = _N; \
+}
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/bit_operation.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/bit_operation.hpp
new file mode 100644
index 000000000..865c1e47d
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/bit_operation.hpp
@@ -0,0 +1,139 @@
+#pragma once
+/**
+ @file
+ @brief bit operation
+*/
+#include <assert.h>
+#include <cybozu/inttype.hpp>
+
+#if (CYBOZU_HOST == CYBOZU_HOST_INTEL)
+ #if defined(_WIN32)
+ #include <intrin.h>
+ #elif defined(__linux__) || defined(__CYGWIN__) || defined(__clang__)
+ #include <x86intrin.h>
+ #elif defined(__GNUC__)
+ #include <emmintrin.h>
+ #endif
+#endif
+
+namespace cybozu {
+
+namespace bit_op_local {
+
+template<bool equalTo8>
+struct Tag {};
+
+// sizeof(T) < 8
+template<>
+struct Tag<false> {
+ template<class T>
+ static inline int bsf(T x)
+ {
+#if defined(_MSC_VER)
+ unsigned long out;
+ _BitScanForward(&out, x);
+#pragma warning(suppress: 6102)
+ return out;
+#else
+ return __builtin_ctz(x);
+#endif
+ }
+ template<class T>
+ static inline int bsr(T x)
+ {
+#if defined(_MSC_VER)
+ unsigned long out;
+ _BitScanReverse(&out, x);
+#pragma warning(suppress: 6102)
+ return out;
+#else
+ return __builtin_clz(x) ^ 0x1f;
+#endif
+ }
+};
+
+// sizeof(T) == 8
+template<>
+struct Tag<true> {
+ template<class T>
+ static inline int bsf(T x)
+ {
+#if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long out;
+ _BitScanForward64(&out, x);
+#pragma warning(suppress: 6102)
+ return out;
+#elif defined(__x86_64__)
+ return __builtin_ctzll(x);
+#else
+ const uint32_t L = uint32_t(x);
+ if (L) return Tag<false>::bsf(L);
+ const uint32_t H = uint32_t(x >> 32);
+ return Tag<false>::bsf(H) + 32;
+#endif
+ }
+ template<class T>
+ static inline int bsr(T x)
+ {
+#if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long out;
+ _BitScanReverse64(&out, x);
+#pragma warning(suppress: 6102)
+ return out;
+#elif defined(__x86_64__)
+ return __builtin_clzll(x) ^ 0x3f;
+#else
+ const uint32_t H = uint32_t(x >> 32);
+ if (H) return Tag<false>::bsr(H) + 32;
+ const uint32_t L = uint32_t(x);
+ return Tag<false>::bsr(L);
+#endif
+ }
+};
+
+} // bit_op_local
+
+template<class T>
+int bsf(T x)
+{
+ return bit_op_local::Tag<sizeof(T) == 8>::bsf(x);
+}
+template<class T>
+int bsr(T x)
+{
+ return bit_op_local::Tag<sizeof(T) == 8>::bsr(x);
+}
+
+template<class T>
+uint64_t makeBitMask64(T x)
+{
+ assert(x < 64);
+ return (uint64_t(1) << x) - 1;
+}
+
+template<class T>
+uint32_t popcnt(T x);
+
+template<>
+inline uint32_t popcnt<uint32_t>(uint32_t x)
+{
+#if defined(_MSC_VER)
+ return static_cast<uint32_t>(_mm_popcnt_u32(x));
+#else
+ return static_cast<uint32_t>(__builtin_popcount(x));
+#endif
+}
+
+template<>
+inline uint32_t popcnt<uint64_t>(uint64_t x)
+{
+#if defined(__x86_64__)
+ return static_cast<uint32_t>(__builtin_popcountll(x));
+#elif defined(_WIN64)
+ return static_cast<uint32_t>(_mm_popcnt_u64(x));
+#else
+ return popcnt<uint32_t>(static_cast<uint32_t>(x)) + popcnt<uint32_t>(static_cast<uint32_t>(x >> 32));
+#endif
+}
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/critical_section.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/critical_section.hpp
new file mode 100644
index 000000000..13d7f3a0e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/critical_section.hpp
@@ -0,0 +1,60 @@
+#pragma once
+/**
+ @file
+ @brief critical section
+
+ @author MITSUNARI Shigeo(@herumi)
+ @author MITSUNARI Shigeo
+*/
+#include <cybozu/mutex.hpp>
+
+namespace cybozu {
+
+class ConditionVariableCs;
+
+namespace thread {
+
+#ifdef _WIN32
+typedef CRITICAL_SECTION CsHandle;
+inline void CsInit(CsHandle& cs) { InitializeCriticalSection(&cs); }
+inline void CsLock(CsHandle& cs) { EnterCriticalSection(&cs); }
+inline void CsUnlock(CsHandle& cs) { LeaveCriticalSection(&cs); }
+inline void CsTerm(CsHandle& cs) { DeleteCriticalSection(&cs); }
+#else
+typedef pthread_mutex_t CsHandle;
+inline void CsInit(CsHandle& cs) { pthread_mutex_init(&cs, NULL); }
+inline void CsLock(CsHandle& cs) { pthread_mutex_lock(&cs); }
+inline void CsUnlock(CsHandle& cs) { pthread_mutex_unlock(&cs); }
+inline void CsTerm(CsHandle& cs) { pthread_mutex_destroy(&cs); }
+#endif
+
+} // cybozu::thread
+
+class CriticalSection {
+ friend class cybozu::ConditionVariableCs;
+public:
+ CriticalSection()
+ {
+ thread::CsInit(hdl_);
+ }
+ ~CriticalSection()
+ {
+ thread::CsTerm(hdl_);
+ }
+ inline void lock()
+ {
+ thread::CsLock(hdl_);
+ }
+ inline void unlock()
+ {
+ thread::CsUnlock(hdl_);
+ }
+private:
+ CriticalSection(const CriticalSection&);
+ CriticalSection& operator=(const CriticalSection&);
+ thread::CsHandle hdl_;
+};
+
+typedef cybozu::thread::AutoLockT<cybozu::CriticalSection> AutoLockCs; //!< auto lock critical section
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/crypto.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/crypto.hpp
new file mode 100644
index 000000000..d427179d9
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/crypto.hpp
@@ -0,0 +1,321 @@
+#pragma once
+/**
+ @file
+ @brief wrap openssl
+ @author MITSUNARI Shigeo(@herumi)
+*/
+
+#include <cybozu/exception.hpp>
+#ifdef __APPLE__
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+#if 0 //#ifdef __APPLE__
+ #define COMMON_DIGEST_FOR_OPENSSL
+ #include <CommonCrypto/CommonDigest.h>
+ #include <CommonCrypto/CommonHMAC.h>
+ #define SHA1 CC_SHA1
+ #define SHA224 CC_SHA224
+ #define SHA256 CC_SHA256
+ #define SHA384 CC_SHA384
+ #define SHA512 CC_SHA512
+#else
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+#include <openssl/sha.h>
+#endif
+#ifdef _MSC_VER
+ #include <cybozu/link_libeay32.hpp>
+#endif
+
+namespace cybozu {
+
+namespace crypto {
+
+class Hash {
+public:
+ enum Name {
+ N_SHA1,
+ N_SHA224,
+ N_SHA256,
+ N_SHA384,
+ N_SHA512
+ };
+private:
+ Name name_;
+ size_t hashSize_;
+ union {
+ SHA_CTX sha1;
+ SHA256_CTX sha256;
+ SHA512_CTX sha512;
+ } ctx_;
+public:
+ static inline size_t getSize(Name name)
+ {
+ switch (name) {
+ case N_SHA1: return SHA_DIGEST_LENGTH;
+ case N_SHA224: return SHA224_DIGEST_LENGTH;
+ case N_SHA256: return SHA256_DIGEST_LENGTH;
+ case N_SHA384: return SHA384_DIGEST_LENGTH;
+ case N_SHA512: return SHA512_DIGEST_LENGTH;
+ default:
+ throw cybozu::Exception("crypto:Hash:getSize") << name;
+ }
+ }
+ static inline const char *getName(Name name)
+ {
+ switch (name) {
+ case N_SHA1: return "sha1";
+ case N_SHA224: return "sha224";
+ case N_SHA256: return "sha256";
+ case N_SHA384: return "sha384";
+ case N_SHA512: return "sha512";
+ default:
+ throw cybozu::Exception("crypto:Hash:getName") << name;
+ }
+ }
+ static inline Name getName(const std::string& nameStr)
+ {
+ static const struct {
+ const char *nameStr;
+ Name name;
+ } tbl[] = {
+ { "sha1", N_SHA1 },
+ { "sha224", N_SHA224 },
+ { "sha256", N_SHA256 },
+ { "sha384", N_SHA384 },
+ { "sha512", N_SHA512 },
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ if (nameStr == tbl[i].nameStr) return tbl[i].name;
+ }
+ throw cybozu::Exception("crypto:Hash:getName") << nameStr;
+ }
+ explicit Hash(Name name = N_SHA1)
+ : name_(name)
+ , hashSize_(getSize(name))
+ {
+ reset();
+ }
+ void update(const void *buf, size_t bufSize)
+ {
+ switch (name_) {
+ case N_SHA1: SHA1_Update(&ctx_.sha1, buf, bufSize); break;
+ case N_SHA224: SHA224_Update(&ctx_.sha256, buf, bufSize); break;
+ case N_SHA256: SHA256_Update(&ctx_.sha256, buf, bufSize); break;
+ case N_SHA384: SHA384_Update(&ctx_.sha512, buf, bufSize); break;
+ case N_SHA512: SHA512_Update(&ctx_.sha512, buf, bufSize); break;
+ }
+ }
+ void update(const std::string& buf)
+ {
+ update(buf.c_str(), buf.size());
+ }
+ void reset()
+ {
+ switch (name_) {
+ case N_SHA1: SHA1_Init(&ctx_.sha1); break;
+ case N_SHA224: SHA224_Init(&ctx_.sha256); break;
+ case N_SHA256: SHA256_Init(&ctx_.sha256); break;
+ case N_SHA384: SHA384_Init(&ctx_.sha512); break;
+ case N_SHA512: SHA512_Init(&ctx_.sha512); break;
+ default:
+ throw cybozu::Exception("crypto:Hash:rset") << name_;
+ }
+ }
+ /*
+ md must have hashSize byte
+ @note clear inner buffer after calling digest
+ */
+ void digest(void *out, const void *buf, size_t bufSize)
+ {
+ update(buf, bufSize);
+ unsigned char *md = reinterpret_cast<unsigned char*>(out);
+ switch (name_) {
+ case N_SHA1: SHA1_Final(md, &ctx_.sha1); break;
+ case N_SHA224: SHA224_Final(md, &ctx_.sha256); break;
+ case N_SHA256: SHA256_Final(md, &ctx_.sha256); break;
+ case N_SHA384: SHA384_Final(md, &ctx_.sha512); break;
+ case N_SHA512: SHA512_Final(md, &ctx_.sha512); break;
+ default:
+ throw cybozu::Exception("crypto:Hash:digest") << name_;
+ }
+ reset();
+ }
+ std::string digest(const void *buf, size_t bufSize)
+ {
+ std::string ret;
+ ret.resize(hashSize_);
+ digest(&ret[0], buf, bufSize);
+ return ret;
+ }
+ std::string digest(const std::string& buf = "")
+ {
+ return digest(buf.c_str(), buf.size());
+ }
+ /*
+ out must have necessary size
+ @note return written size
+ */
+ static inline size_t digest(void *out, Name name, const void *buf, size_t bufSize)
+ {
+ unsigned char *md = (unsigned char*)out;
+ const unsigned char *src = cybozu::cast<const unsigned char *>(buf);
+ switch (name) {
+ case N_SHA1: SHA1(src, bufSize, md); return 160 / 8;
+ case N_SHA224: SHA224(src, bufSize, md); return 224 / 8;
+ case N_SHA256: SHA256(src, bufSize, md); return 256 / 8;
+ case N_SHA384: SHA384(src, bufSize, md); return 384 / 8;
+ case N_SHA512: SHA512(src, bufSize, md); return 512 / 8;
+ default:
+ return 0;
+ }
+ }
+ static inline std::string digest(Name name, const void *buf, size_t bufSize)
+ {
+ char md[128];
+ size_t size = digest(md, name, buf, bufSize);
+ if (size == 0) throw cybozu::Exception("crypt:Hash:digest") << name;
+ return std::string(md, size);
+ }
+ static inline std::string digest(Name name, const std::string& buf)
+ {
+ return digest(name, buf.c_str(), buf.size());
+ }
+};
+
+class Hmac {
+ const EVP_MD *evp_;
+public:
+ explicit Hmac(Hash::Name name = Hash::N_SHA1)
+ {
+ switch (name) {
+ case Hash::N_SHA1: evp_ = EVP_sha1(); break;
+ case Hash::N_SHA224: evp_ = EVP_sha224(); break;
+ case Hash::N_SHA256: evp_ = EVP_sha256(); break;
+ case Hash::N_SHA384: evp_ = EVP_sha384(); break;
+ case Hash::N_SHA512: evp_ = EVP_sha512(); break;
+ default:
+ throw cybozu::Exception("crypto:Hmac:") << name;
+ }
+ }
+ std::string eval(const std::string& key, const std::string& data)
+ {
+ std::string out(EVP_MD_size(evp_) + 1, 0);
+ unsigned int outLen = 0;
+ if (HMAC(evp_, key.c_str(), static_cast<int>(key.size()),
+ cybozu::cast<const uint8_t *>(data.c_str()), data.size(), cybozu::cast<uint8_t *>(&out[0]), &outLen)) {
+ out.resize(outLen);
+ return out;
+ }
+ throw cybozu::Exception("crypto::Hamc::eval");
+ }
+};
+
+class Cipher {
+ const EVP_CIPHER *cipher_;
+ EVP_CIPHER_CTX *ctx_;
+public:
+ enum Name {
+ N_AES128_CBC,
+ N_AES192_CBC,
+ N_AES256_CBC,
+ N_AES128_ECB, // be carefull to use
+ N_AES192_ECB, // be carefull to use
+ N_AES256_ECB, // be carefull to use
+ };
+ static inline size_t getSize(Name name)
+ {
+ switch (name) {
+ case N_AES128_CBC: return 128;
+ case N_AES192_CBC: return 192;
+ case N_AES256_CBC: return 256;
+ case N_AES128_ECB: return 128;
+ case N_AES192_ECB: return 192;
+ case N_AES256_ECB: return 256;
+ default:
+ throw cybozu::Exception("crypto:Cipher:getSize") << name;
+ }
+ }
+ enum Mode {
+ Decoding,
+ Encoding
+ };
+ explicit Cipher(Name name = N_AES128_CBC)
+ : cipher_(0)
+ , ctx_(0)
+ {
+ ctx_ = EVP_CIPHER_CTX_new();
+ if (ctx_ == 0) throw cybozu::Exception("crypto:Cipher:EVP_CIPHER_CTX_new");
+ switch (name) {
+ case N_AES128_CBC: cipher_ = EVP_aes_128_cbc(); break;
+ case N_AES192_CBC: cipher_ = EVP_aes_192_cbc(); break;
+ case N_AES256_CBC: cipher_ = EVP_aes_256_cbc(); break;
+ case N_AES128_ECB: cipher_ = EVP_aes_128_ecb(); break;
+ case N_AES192_ECB: cipher_ = EVP_aes_192_ecb(); break;
+ case N_AES256_ECB: cipher_ = EVP_aes_256_ecb(); break;
+ default:
+ throw cybozu::Exception("crypto:Cipher:Cipher:name") << (int)name;
+ }
+ }
+ ~Cipher()
+ {
+ if (ctx_) EVP_CIPHER_CTX_free(ctx_);
+ }
+ /*
+ @note don't use padding = true
+ */
+ void setup(Mode mode, const std::string& key, const std::string& iv, bool padding = false)
+ {
+ const int keyLen = static_cast<int>(key.size());
+ const int expectedKeyLen = EVP_CIPHER_key_length(cipher_);
+ if (keyLen != expectedKeyLen) {
+ throw cybozu::Exception("crypto:Cipher:setup:keyLen") << keyLen << expectedKeyLen;
+ }
+
+ int ret = EVP_CipherInit_ex(ctx_, cipher_, NULL, cybozu::cast<const uint8_t*>(key.c_str()), cybozu::cast<const uint8_t*>(iv.c_str()), mode == Encoding ? 1 : 0);
+ if (ret != 1) {
+ throw cybozu::Exception("crypto:Cipher:setup:EVP_CipherInit_ex") << ret;
+ }
+ ret = EVP_CIPHER_CTX_set_padding(ctx_, padding ? 1 : 0);
+ if (ret != 1) {
+ throw cybozu::Exception("crypto:Cipher:setup:EVP_CIPHER_CTX_set_padding") << ret;
+ }
+/*
+ const int ivLen = static_cast<int>(iv.size());
+ const int expectedIvLen = EVP_CIPHER_CTX_iv_length(&ctx_);
+ if (ivLen != expectedIvLen) {
+ throw cybozu::Exception("crypto:Cipher:setup:ivLen") << ivLen << expectedIvLen;
+ }
+*/
+ }
+ /*
+ the size of outBuf must be larger than inBufSize + blockSize
+ @retval positive or 0 : writeSize(+blockSize)
+ @retval -1 : error
+ */
+ int update(char *outBuf, const char *inBuf, int inBufSize)
+ {
+ int outLen = 0;
+ int ret = EVP_CipherUpdate(ctx_, cybozu::cast<uint8_t*>(outBuf), &outLen, cybozu::cast<const uint8_t*>(inBuf), inBufSize);
+ if (ret != 1) return -1;
+ return outLen;
+ }
+ /*
+ return -1 if padding
+ @note don't use
+ */
+ int finalize(char *outBuf)
+ {
+ int outLen = 0;
+ int ret = EVP_CipherFinal_ex(ctx_, cybozu::cast<uint8_t*>(outBuf), &outLen);
+ if (ret != 1) return -1;
+ return outLen;
+ }
+};
+
+} } // cybozu::crypto
+
+#ifdef __APPLE__
+ #pragma GCC diagnostic pop
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/endian.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/endian.hpp
new file mode 100644
index 000000000..3f1575c46
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/endian.hpp
@@ -0,0 +1,224 @@
+#pragma once
+
+/**
+ @file
+ @brief deal with big and little endian
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <cybozu/inttype.hpp>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+namespace cybozu {
+
+#ifdef _MSC_VER
+inline uint16_t byteSwap(uint16_t x) { return _byteswap_ushort(x); }
+inline uint32_t byteSwap(uint32_t x) { return _byteswap_ulong(x); }
+inline uint64_t byteSwap(uint64_t x) { return _byteswap_uint64(x); }
+#else
+#if (((__GNUC__) << 16) + (__GNUC_MINOR__)) >= ((4 << 16) + 8)
+inline uint16_t byteSwap(uint16_t x) { return __builtin_bswap16(x); }
+#else
+inline uint16_t byteSwap(uint16_t x) { return (x >> 8) | (x << 8); }
+#endif
+inline uint32_t byteSwap(uint32_t x) { return __builtin_bswap32(x); }
+inline uint64_t byteSwap(uint64_t x) { return __builtin_bswap64(x); }
+#endif
+
+/**
+ get 16bit integer as little endian
+ @param src [in] pointer
+*/
+inline uint16_t Get16bitAsLE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint16_t x;
+ memcpy(&x, src, sizeof(x));
+ return x;
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return p[0] | (p[1] << 8);
+#endif
+}
+
+/**
+ get 32bit integer as little endian
+ @param src [in] pointer
+*/
+inline uint32_t Get32bitAsLE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint32_t x;
+ memcpy(&x, src, sizeof(x));
+ return x;
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return Get16bitAsLE(p) | (static_cast<uint32_t>(Get16bitAsLE(p + 2)) << 16);
+#endif
+}
+
+/**
+ get 64bit integer as little endian
+ @param src [in] pointer
+*/
+inline uint64_t Get64bitAsLE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint64_t x;
+ memcpy(&x, src, sizeof(x));
+ return x;
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return Get32bitAsLE(p) | (static_cast<uint64_t>(Get32bitAsLE(p + 4)) << 32);
+#endif
+}
+
+/**
+ get 16bit integer as bit endian
+ @param src [in] pointer
+*/
+inline uint16_t Get16bitAsBE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint16_t x;
+ memcpy(&x, src, sizeof(x));
+ return byteSwap(x);
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return p[1] | (p[0] << 8);
+#endif
+}
+
+/**
+ get 32bit integer as bit endian
+ @param src [in] pointer
+*/
+inline uint32_t Get32bitAsBE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint32_t x;
+ memcpy(&x, src, sizeof(x));
+ return byteSwap(x);
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return Get16bitAsBE(p + 2) | (static_cast<uint32_t>(Get16bitAsBE(p)) << 16);
+#endif
+}
+
+/**
+ get 64bit integer as big endian
+ @param src [in] pointer
+*/
+inline uint64_t Get64bitAsBE(const void *src)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ uint64_t x;
+ memcpy(&x, src, sizeof(x));
+ return byteSwap(x);
+#else
+ const uint8_t *p = static_cast<const uint8_t *>(src);
+ return Get32bitAsBE(p + 4) | (static_cast<uint64_t>(Get32bitAsBE(p)) << 32);
+#endif
+}
+
+/**
+ set 16bit integer as little endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set16bitAsLE(void *src, uint16_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ p[0] = static_cast<uint8_t>(x);
+ p[1] = static_cast<uint8_t>(x >> 8);
+#endif
+}
+/**
+ set 32bit integer as little endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set32bitAsLE(void *src, uint32_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ p[0] = static_cast<uint8_t>(x);
+ p[1] = static_cast<uint8_t>(x >> 8);
+ p[2] = static_cast<uint8_t>(x >> 16);
+ p[3] = static_cast<uint8_t>(x >> 24);
+#endif
+}
+/**
+ set 64bit integer as little endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set64bitAsLE(void *src, uint64_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ Set32bitAsLE(p, static_cast<uint32_t>(x));
+ Set32bitAsLE(p + 4, static_cast<uint32_t>(x >> 32));
+#endif
+}
+/**
+ set 16bit integer as big endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set16bitAsBE(void *src, uint16_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ x = byteSwap(x);
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ p[0] = static_cast<uint8_t>(x >> 8);
+ p[1] = static_cast<uint8_t>(x);
+#endif
+}
+/**
+ set 32bit integer as big endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set32bitAsBE(void *src, uint32_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ x = byteSwap(x);
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ p[0] = static_cast<uint8_t>(x >> 24);
+ p[1] = static_cast<uint8_t>(x >> 16);
+ p[2] = static_cast<uint8_t>(x >> 8);
+ p[3] = static_cast<uint8_t>(x);
+#endif
+}
+/**
+ set 64bit integer as big endian
+ @param src [out] pointer
+ @param x [in] integer
+*/
+inline void Set64bitAsBE(void *src, uint64_t x)
+{
+#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE
+ x = byteSwap(x);
+ memcpy(src, &x, sizeof(x));
+#else
+ uint8_t *p = static_cast<uint8_t *>(src);
+ Set32bitAsBE(p, static_cast<uint32_t>(x >> 32));
+ Set32bitAsBE(p + 4, static_cast<uint32_t>(x));
+#endif
+}
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/exception.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/exception.hpp
new file mode 100644
index 000000000..247ba4de0
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/exception.hpp
@@ -0,0 +1,252 @@
+#pragma once
+/**
+ @file
+ @brief definition of abstruct exception class
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#ifdef CYBOZU_MINIMUM_EXCEPTION
+
+#include <cybozu/inttype.hpp>
+
+namespace cybozu {
+
+namespace exception {
+inline const char *makeString(const char *, size_t)
+{
+ return "";
+}
+
+} // cybozu::exception
+
+class Exception {
+public:
+ explicit Exception(const char* = 0, bool = true)
+ {
+ }
+ ~Exception() CYBOZU_NOEXCEPT {}
+ const char *what() const CYBOZU_NOEXCEPT { return "cybozu:Exception"; }
+ template<class T>
+ Exception& operator<<(const T&)
+ {
+ return *this;
+ }
+};
+
+} // cybozu
+
+#else
+
+#include <string>
+#include <algorithm>
+#include <sstream>
+#include <errno.h>
+#include <stdio.h>
+#ifdef _WIN32
+ #include <winsock2.h>
+ #include <windows.h>
+#else
+ #include <string.h> // for strerror_r
+#endif
+#include <cybozu/inttype.hpp>
+#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE
+ #include <cybozu/stacktrace.hpp>
+#endif
+
+namespace cybozu {
+
+const bool DontThrow = true;
+
+namespace exception {
+
+/* get max 16 characters to avoid buffer overrun */
+inline std::string makeString(const char *str, size_t size)
+{
+ return std::string(str, std::min<size_t>(size, 16));
+}
+
+#ifdef _WIN32
+inline std::string wstr2str(const std::wstring& wstr)
+{
+ std::string str;
+ for (size_t i = 0; i < wstr.size(); i++) {
+ uint16_t c = wstr[i];
+ if (c < 0x80) {
+ str += char(c);
+ } else {
+ char buf[16];
+ CYBOZU_SNPRINTF(buf, sizeof(buf), "\\u%04x", c);
+ str += buf;
+ }
+ }
+ return str;
+}
+#endif
+
+} // cybozu::exception
+
+/**
+ convert errno to string
+ @param err [in] errno
+ @note for both windows and linux
+*/
+inline std::string ConvertErrorNoToString(int err)
+{
+ char errBuf[256];
+ std::string ret;
+#ifdef _WIN32
+ if (strerror_s(errBuf, sizeof(errBuf), err) == 0) {
+ ret = errBuf;
+ } else {
+ ret = "err";
+ }
+#elif defined(_GNU_SOURCE)
+ ret = ::strerror_r(err, errBuf, sizeof(errBuf));
+#else
+ if (strerror_r(err, errBuf, sizeof(errBuf)) == 0) {
+ ret = errBuf;
+ } else {
+ ret = "err";
+ }
+#endif
+ char buf2[64];
+ CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%d)", err);
+ ret += buf2;
+ return ret;
+}
+
+class Exception : public std::exception {
+ mutable std::string str_;
+#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE
+ mutable std::string stackTrace_;
+#endif
+public:
+ explicit Exception(const std::string& name = "", bool enableStackTrace = true)
+ : str_(name)
+ {
+#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE
+ if (enableStackTrace) stackTrace_ = cybozu::StackTrace().toString();
+#else
+ cybozu::disable_warning_unused_variable(enableStackTrace);
+#endif
+ }
+ ~Exception() CYBOZU_NOEXCEPT {}
+ const char *what() const CYBOZU_NOEXCEPT { return toString().c_str(); }
+ const std::string& toString() const CYBOZU_NOEXCEPT
+ {
+#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE
+ try {
+ if (!stackTrace_.empty()) {
+#ifdef CYBOZU_STACKTRACE_ONELINE
+ str_ += "\n<<<STACKTRACE>>> ";
+ str_ += stackTrace_;
+#else
+ str_ += "\n<<<STACKTRACE\n";
+ str_ += stackTrace_;
+ str_ += "\n>>>STACKTRACE";
+#endif
+ }
+ } catch (...) {
+ }
+ stackTrace_.clear();
+#endif
+ return str_;
+ }
+ Exception& operator<<(const char *s)
+ {
+ str_ += ':';
+ str_ += s;
+ return *this;
+ }
+ Exception& operator<<(const std::string& s)
+ {
+ return operator<<(s.c_str());
+ }
+#ifdef _WIN32
+ Exception& operator<<(const std::wstring& s)
+ {
+ return operator<<(cybozu::exception::wstr2str(s));
+ }
+#endif
+ template<class T>
+ Exception& operator<<(const T& x)
+ {
+ std::ostringstream os;
+ os << x;
+ return operator<<(os.str());
+ }
+};
+
+class ErrorNo {
+public:
+#ifdef _WIN32
+ typedef unsigned int NativeErrorNo;
+#else
+ typedef int NativeErrorNo;
+#endif
+ explicit ErrorNo(NativeErrorNo err)
+ : err_(err)
+ {
+ }
+ ErrorNo()
+ : err_(getLatestNativeErrorNo())
+ {
+ }
+ NativeErrorNo getLatestNativeErrorNo() const
+ {
+#ifdef _WIN32
+ return ::GetLastError();
+#else
+ return errno;
+#endif
+ }
+ /**
+ convert NativeErrNo to string(maybe UTF8)
+ @param err [in] errno
+ @note Linux : same as ConvertErrorNoToString
+ Windows : for Win32 API(use en-us)
+ */
+ std::string toString() const
+ {
+#ifdef _WIN32
+ const int msgSize = 256;
+ wchar_t msg[msgSize];
+ int size = FormatMessageW(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ 0,
+ err_,
+ MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+ msg,
+ msgSize,
+ NULL
+ );
+ if (size <= 0) return "";
+ // remove last "\r\n"
+ if (size > 2 && msg[size - 2] == '\r') {
+ msg[size - 2] = 0;
+ size -= 2;
+ }
+ std::string ret;
+ ret.resize(size);
+ // assume ascii only
+ for (int i = 0; i < size; i++) {
+ ret[i] = (char)msg[i];
+ }
+ char buf2[64];
+ CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%u)", err_);
+ ret += buf2;
+ return ret;
+#else
+ return ConvertErrorNoToString(err_);
+#endif
+ }
+private:
+ NativeErrorNo err_;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const cybozu::ErrorNo& self)
+{
+ return os << self.toString();
+}
+
+} // cybozu
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/hash.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/hash.hpp
new file mode 100644
index 000000000..3fd246fa1
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/hash.hpp
@@ -0,0 +1,67 @@
+#pragma once
+#include <cybozu/inttype.hpp>
+
+namespace cybozu {
+
+template<class Iter>
+uint32_t hash32(Iter begin, Iter end, uint32_t v = 0)
+{
+ if (v == 0) v = 2166136261U;
+ while (begin != end) {
+ v ^= *begin++;
+ v *= 16777619;
+ }
+ return v;
+}
+template<class Iter>
+uint64_t hash64(Iter begin, Iter end, uint64_t v = 0)
+{
+ if (v == 0) v = 14695981039346656037ULL;
+ while (begin != end) {
+ v ^= *begin++;
+ v *= 1099511628211ULL;
+ }
+ v ^= v >> 32;
+ return v;
+}
+template<class T>
+uint32_t hash32(const T *x, size_t n, uint32_t v = 0)
+{
+ return hash32(x, x + n, v);
+}
+template<class T>
+uint64_t hash64(const T *x, size_t n, uint64_t v = 0)
+{
+ return hash64(x, x + n, v);
+}
+
+} // cybozu
+
+namespace boost {
+
+template<class T>
+struct hash;
+
+} // boost
+
+#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+#include <functional>
+#else
+
+namespace std { CYBOZU_NAMESPACE_TR1_BEGIN
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4099) // missmatch class and struct
+#endif
+#ifndef __APPLE__
+template<class T>
+struct hash;
+#endif
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+CYBOZU_NAMESPACE_TR1_END } // std
+
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/inttype.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/inttype.hpp
new file mode 100644
index 000000000..62856bdb3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/inttype.hpp
@@ -0,0 +1,163 @@
+#pragma once
+/**
+ @file
+ @brief int type definition and macros
+ @author MITSUNARI Shigeo(@herumi)
+*/
+
+#if defined(_MSC_VER) && (MSC_VER <= 1500) && !defined(CYBOZU_DEFINED_INTXX)
+ #define CYBOZU_DEFINED_INTXX
+ typedef __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+ typedef unsigned int uint32_t;
+ typedef int int32_t;
+ typedef unsigned short uint16_t;
+ typedef short int16_t;
+ typedef unsigned char uint8_t;
+ typedef signed char int8_t;
+#else
+ #include <stdint.h>
+#endif
+
+#ifdef _MSC_VER
+ #ifndef CYBOZU_DEFINED_SSIZE_T
+ #define CYBOZU_DEFINED_SSIZE_T
+ #ifdef _WIN64
+ typedef int64_t ssize_t;
+ #else
+ typedef int32_t ssize_t;
+ #endif
+ #endif
+#else
+ #include <unistd.h> // for ssize_t
+#endif
+
+#ifndef CYBOZU_ALIGN
+ #ifdef _MSC_VER
+ #define CYBOZU_ALIGN(x) __declspec(align(x))
+ #else
+ #define CYBOZU_ALIGN(x) __attribute__((aligned(x)))
+ #endif
+#endif
+#ifndef CYBOZU_FORCE_INLINE
+ #ifdef _MSC_VER
+ #define CYBOZU_FORCE_INLINE __forceinline
+ #else
+ #define CYBOZU_FORCE_INLINE __attribute__((always_inline))
+ #endif
+#endif
+#ifndef CYBOZU_UNUSED
+ #ifdef __GNUC__
+ #define CYBOZU_UNUSED __attribute__((unused))
+ #else
+ #define CYBOZU_UNUSED
+ #endif
+#endif
+#ifndef CYBOZU_ALLOCA
+ #ifdef _MSC_VER
+ #include <malloc.h>
+ #define CYBOZU_ALLOCA(x) _malloca(x)
+ #else
+ #define CYBOZU_ALLOCA(x) __builtin_alloca(x)
+ #endif
+#endif
+#ifndef CYBOZU_NUM_OF_ARRAY
+ #define CYBOZU_NUM_OF_ARRAY(x) (sizeof(x) / sizeof(*x))
+#endif
+#ifndef CYBOZU_SNPRINTF
+ #if defined(_MSC_VER) && (_MSC_VER < 1900)
+ #define CYBOZU_SNPRINTF(x, len, ...) (void)_snprintf_s(x, len, len - 1, __VA_ARGS__)
+ #else
+ #define CYBOZU_SNPRINTF(x, len, ...) (void)snprintf(x, len, __VA_ARGS__)
+ #endif
+#endif
+
+#define CYBOZU_CPP_VERSION_CPP03 0
+#define CYBOZU_CPP_VERSION_TR1 1
+#define CYBOZU_CPP_VERSION_CPP11 2
+#define CYBOZU_CPP_VERSION_CPP14 3
+#define CYBOZU_CPP_VERSION_CPP17 4
+
+#ifdef __GNUC__
+ #define CYBOZU_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor))
+#else
+ #define CYBOZU_GNUC_PREREQ(major, minor) 0
+#endif
+
+#if (__cplusplus >= 201703)
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP17
+#elif (__cplusplus >= 201402)
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP14
+#elif (__cplusplus >= 201103) || (_MSC_VER >= 1500) || defined(__GXX_EXPERIMENTAL_CXX0X__)
+ #if defined(_MSC_VER) && (_MSC_VER <= 1600)
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1
+ #else
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP11
+ #endif
+#elif CYBOZU_GNUC_PREREQ(4, 5) || (CYBOZU_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || (__clang_major__ >= 3)
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1
+#else
+ #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP03
+#endif
+
+#ifdef CYBOZU_USE_BOOST
+ #define CYBOZU_NAMESPACE_STD boost
+ #define CYBOZU_NAMESPACE_TR1_BEGIN
+ #define CYBOZU_NAMESPACE_TR1_END
+#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) && !defined(__APPLE__)
+ #define CYBOZU_NAMESPACE_STD std::tr1
+ #define CYBOZU_NAMESPACE_TR1_BEGIN namespace tr1 {
+ #define CYBOZU_NAMESPACE_TR1_END }
+#else
+ #define CYBOZU_NAMESPACE_STD std
+ #define CYBOZU_NAMESPACE_TR1_BEGIN
+ #define CYBOZU_NAMESPACE_TR1_END
+#endif
+
+#ifndef CYBOZU_OS_BIT
+ #if defined(_WIN64) || defined(__x86_64__) || defined(__AARCH64EL__) || defined(__EMSCRIPTEN__)
+ #define CYBOZU_OS_BIT 64
+ #else
+ #define CYBOZU_OS_BIT 32
+ #endif
+#endif
+
+#ifndef CYBOZU_HOST
+ #define CYBOZU_HOST_UNKNOWN 0
+ #define CYBOZU_HOST_INTEL 1
+ #define CYBOZU_HOST_ARM 2
+ #if defined(_M_IX86) || defined(_M_AMD64) || defined(__x86_64__) || defined(__i386__)
+ #define CYBOZU_HOST CYBOZU_HOST_INTEL
+ #elif defined(__arm__) || defined(__AARCH64EL__)
+ #define CYBOZU_HOST CYBOZU_HOST_ARM
+ #else
+ #define CYBOZU_HOST CYBOZU_HOST_UNKNOWN
+ #endif
+#endif
+
+#ifndef CYBOZU_ENDIAN
+ #define CYBOZU_ENDIAN_UNKNOWN 0
+ #define CYBOZU_ENDIAN_LITTLE 1
+ #define CYBOZU_ENDIAN_BIG 2
+ #if (CYBOZU_HOST == CYBOZU_HOST_INTEL)
+ #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE
+ #elif (CYBOZU_HOST == CYBOZU_HOST_ARM) && (defined(__ARM_EABI__) || defined(__AARCH64EL__))
+ #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE
+ #else
+ #define CYBOZU_ENDIAN CYBOZU_ENDIAN_UNKNOWN
+ #endif
+#endif
+
+#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+ #define CYBOZU_NOEXCEPT noexcept
+#else
+ #define CYBOZU_NOEXCEPT throw()
+#endif
+namespace cybozu {
+template<class T>
+void disable_warning_unused_variable(const T&) { }
+template<class T, class S>
+T cast(const S* ptr) { return static_cast<T>(static_cast<const void*>(ptr)); }
+template<class T, class S>
+T cast(S* ptr) { return static_cast<T>(static_cast<void*>(ptr)); }
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/itoa.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/itoa.hpp
new file mode 100644
index 000000000..072e5b8b4
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/itoa.hpp
@@ -0,0 +1,337 @@
+#pragma once
+/**
+ @file
+ @brief convert integer to string(ascii)
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <limits.h>
+#ifndef CYBOZU_DONT_USE_STRING
+#include <string>
+#endif
+#include <memory.h>
+#include <cybozu/inttype.hpp>
+#include <cybozu/bit_operation.hpp>
+
+namespace cybozu {
+
+template<class T>
+size_t getHexLength(T x)
+{
+ return x == 0 ? 1 : cybozu::bsr(x) / 4 + 1;
+}
+
+template<class T>
+size_t getBinLength(T x)
+{
+ return x == 0 ? 1 : cybozu::bsr(x) + 1;
+}
+/*
+ convert x to hex string with len
+ @note out should have getHexLength(x) size
+ out is not NUL terminated
+*/
+template<class T>
+void itohex(char *out, size_t len, T x, bool upCase = true)
+{
+ static const char *hexTbl[] = {
+ "0123456789abcdef",
+ "0123456789ABCDEF"
+ };
+ const char *tbl = hexTbl[upCase];
+ for (size_t i = 0; i < len; i++) {
+ out[len - i - 1] = tbl[x % 16];
+ x /= 16;
+ }
+}
+/*
+ convert x to bin string with len
+ @note out should have getBinLength(x) size
+ out is not NUL terminated
+*/
+template<class T>
+void itobin(char *out, size_t len, T x)
+{
+ for (size_t i = 0; i < len; i++) {
+ out[len - i - 1] = '0' + (x & 1);
+ x >>= 1;
+ }
+}
+
+namespace itoa_local {
+
+/*
+ convert x to dec
+ use buf[0, bufSize)
+ return 0 if false
+ return writtenSize which is not terminated
+ @REMARK the top of string is buf + bufSize - writtenSize
+*/
+template<class UT>
+size_t uintToDec(char *buf, size_t bufSize, UT x)
+{
+ for (size_t i = 0; i < bufSize; i++) {
+ buf[bufSize - 1 - i] = '0' + static_cast<int>(x % 10);
+ x /= 10;
+ if (x == 0) return i + 1;
+ }
+ return 0;
+}
+
+/*
+ convert x to hex
+ use buf[0, bufSize)
+ return 0 if false
+ return writtenSize which is not terminated
+ @REMARK the top of string is buf + bufSize - writtenSize
+*/
+template<class UT>
+size_t uintToHex(char *buf, size_t bufSize, UT x, bool upCase = true)
+{
+ static const char *hexTbl[] = {
+ "0123456789abcdef",
+ "0123456789ABCDEF"
+ };
+ const char *tbl = hexTbl[upCase];
+ for (size_t i = 0; i < bufSize; i++) {
+ buf[bufSize - 1 - i] = tbl[x % 16];
+ x /= 16;
+ if (x == 0) return i + 1;
+ }
+ return 0;
+}
+
+/*
+ convert x to bin
+ use buf[0, bufSize)
+ return 0 if false
+ return writtenSize which is not terminated
+ @REMARK the top of string is buf + bufSize - writtenSize
+*/
+template<class UT>
+size_t uintToBin(char *buf, size_t bufSize, UT x)
+{
+ for (size_t i = 0; i < bufSize; i++) {
+ buf[bufSize - 1 - i] = '0' + (x & 1);
+ x >>= 1;
+ if (x == 0) return i + 1;
+ }
+ return 0;
+}
+
+template<class T>
+size_t intToDec(char *buf, size_t bufSize, T x)
+{
+ if (x == LLONG_MIN) {
+ const char minStr[] = "-9223372036854775808";
+ const size_t minStrLen = sizeof(minStr) - 1;
+ if (bufSize < minStrLen) {
+ return 0;
+ } else {
+ memcpy(buf + bufSize - minStrLen, minStr, minStrLen);
+ return minStrLen;
+ }
+ }
+ bool negative = x < 0;
+ uint64_t absX = negative ? -x : x;
+ size_t n = uintToDec(buf, bufSize, absX);
+ if (n == 0) return 0;
+ if (negative) {
+ if (bufSize == n) return 0;
+ n++;
+ buf[bufSize - n] = '-';
+ }
+ return n;
+}
+
+#ifndef CYBOZU_DONT_USE_STRING
+template<typename T>
+void convertFromUint(std::string& out, T x)
+{
+ char buf[40];
+ size_t n = uintToDec(buf, sizeof(buf), x);
+ assert(n > 0);
+ out.assign(buf + sizeof(buf) - n, n);
+}
+
+inline void convertFromInt(std::string& out, long long x)
+{
+ char buf[40];
+ size_t n = intToDec(buf, sizeof(buf), x);
+ assert(n > 0);
+ out.assign(buf + sizeof(buf) - n, n);
+}
+
+template<typename T>
+void itohexLocal(std::string& out, T x, bool upCase, bool withZero)
+{
+ const size_t size = withZero ? sizeof(T) * 2 : getHexLength(x);
+ out.resize(size);
+ itohex(&out[0], size, x, upCase);
+}
+
+template<class T>
+void itobinLocal(std::string& out, T x, bool withZero)
+{
+ const size_t size = withZero ? sizeof(T) * 8 : getBinLength(x);
+ out.resize(size);
+ itobin(&out[0], size, x);
+}
+#endif
+
+} // itoa_local
+
+#ifndef CYBOZU_DONT_USE_STRING
+/**
+ convert int to string
+ @param out [out] string
+ @param x [in] int
+*/
+inline void itoa(std::string& out, int x)
+{
+ itoa_local::convertFromInt(out, x);
+}
+
+/**
+ convert long long to string
+ @param out [out] string
+ @param x [in] long long
+*/
+inline void itoa(std::string& out, long long x)
+{
+ itoa_local::convertFromInt(out, x);
+}
+
+/**
+ convert unsigned int to string
+ @param out [out] string
+ @param x [in] unsigned int
+*/
+inline void itoa(std::string& out, unsigned int x)
+{
+ itoa_local::convertFromUint(out, x);
+}
+
+/**
+ convert unsigned long long to string
+ @param out [out] string
+ @param x [in] unsigned long long
+*/
+inline void itoa(std::string& out, unsigned long long x)
+{
+ itoa_local::convertFromUint(out, x);
+}
+
+#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8)
+inline void itoa(std::string& out, long x) { itoa(out, static_cast<long long>(x)); }
+inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast<unsigned long long>(x)); }
+#else
+inline void itoa(std::string& out, long x) { itoa(out, static_cast<int>(x)); }
+inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast<int>(x)); }
+#endif
+/**
+ convert integer to string
+ @param x [in] int
+*/
+template<typename T>
+inline std::string itoa(T x)
+{
+ std::string ret;
+ itoa(ret, x);
+ return ret;
+}
+
+inline void itohex(std::string& out, unsigned char x, bool upCase = true, bool withZero = true)
+{
+ itoa_local::itohexLocal(out, x, upCase, withZero);
+}
+
+inline void itohex(std::string& out, unsigned short x, bool upCase = true, bool withZero = true)
+{
+ itoa_local::itohexLocal(out, x, upCase, withZero);
+}
+
+inline void itohex(std::string& out, unsigned int x, bool upCase = true, bool withZero = true)
+{
+ itoa_local::itohexLocal(out, x, upCase, withZero);
+}
+
+inline void itohex(std::string& out, unsigned long x, bool upCase = true, bool withZero = true)
+{
+ itoa_local::itohexLocal(out, x, upCase, withZero);
+}
+
+inline void itohex(std::string& out, unsigned long long x, bool upCase = true, bool withZero = true)
+{
+ itoa_local::itohexLocal(out, x, upCase, withZero);
+}
+
+template<typename T>
+inline std::string itobin(T x, bool withZero = true)
+{
+ std::string out;
+ itoa_local::itobinLocal(out, x, withZero);
+ return out;
+}
+
+inline void itobin(std::string& out, unsigned char x, bool withZero = true)
+{
+ itoa_local::itobinLocal(out, x, withZero);
+}
+
+inline void itobin(std::string& out, unsigned short x, bool withZero = true)
+{
+ itoa_local::itobinLocal(out, x, withZero);
+}
+
+inline void itobin(std::string& out, unsigned int x, bool withZero = true)
+{
+ itoa_local::itobinLocal(out, x, withZero);
+}
+
+inline void itobin(std::string& out, unsigned long x, bool withZero = true)
+{
+ itoa_local::itobinLocal(out, x, withZero);
+}
+
+inline void itobin(std::string& out, unsigned long long x, bool withZero = true)
+{
+ itoa_local::itobinLocal(out, x, withZero);
+}
+
+template<typename T>
+inline std::string itohex(T x, bool upCase = true, bool withZero = true)
+{
+ std::string out;
+ itohex(out, x, upCase, withZero);
+ return out;
+}
+/**
+ convert integer to string with zero padding
+ @param x [in] int
+ @param len [in] minimum lengh of string
+ @param c [in] padding character
+ @note
+ itoa(12, 4) == "0012"
+ itoa(1234, 4) == "1234"
+ itoa(12345, 4) == "12345"
+ itoa(-12, 4) == "-012"
+*/
+template<typename T>
+inline std::string itoaWithZero(T x, size_t len, char c = '0')
+{
+ std::string ret;
+ itoa(ret, x);
+ if (ret.size() < len) {
+ std::string zero(len - ret.size(), c);
+ if (x >= 0) {
+ ret = zero + ret;
+ } else {
+ ret = "-" + zero + ret.substr(1);
+ }
+ }
+ return ret;
+}
+#endif
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/link_libeay32.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_libeay32.hpp
new file mode 100644
index 000000000..d83f1b6ea
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_libeay32.hpp
@@ -0,0 +1,21 @@
+#pragma once
+/**
+ @file
+ @brief link libeay32.lib of openssl
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#if defined(_WIN32) && defined(_MT)
+ #if _MSC_VER >= 1900 // VC2015
+ #ifdef _WIN64
+ #pragma comment(lib, "mt/14/libeay32.lib")
+ #else
+ #pragma comment(lib, "mt/14/32/libeay32.lib")
+ #endif
+// #elif _MSC_VER == 1800 // VC2013
+ #else
+ #pragma comment(lib, "mt/12/libeay32.lib")
+ #endif
+ #pragma comment(lib, "advapi32.lib")
+ #pragma comment(lib, "gdi32.lib")
+ #pragma comment(lib, "user32.lib")
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/link_mpir.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_mpir.hpp
new file mode 100644
index 000000000..d20d7b1a9
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_mpir.hpp
@@ -0,0 +1,18 @@
+#pragma once
+/**
+ @file
+ @brief link mpir/mpirxx of mpir
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#if defined(_WIN32) && defined(_MT)
+ #if _MSC_VER >= 1900 // VC2015, VC2017(1910)
+ #pragma comment(lib, "mt/14/mpir.lib")
+ #pragma comment(lib, "mt/14/mpirxx.lib")
+ #elif _MSC_VER == 1800 // VC2013
+ #pragma comment(lib, "mt/12/mpir.lib")
+ #pragma comment(lib, "mt/12/mpirxx.lib")
+ #elif _MSC_VER == 1700 // VC2012
+ #pragma comment(lib, "mt/11/mpir.lib")
+ #pragma comment(lib, "mt/11/mpirxx.lib")
+ #endif
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/link_ssleay32.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_ssleay32.hpp
new file mode 100644
index 000000000..60c2361ae
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/link_ssleay32.hpp
@@ -0,0 +1,19 @@
+#pragma once
+/**
+ @file
+ @brief link ssleay32.lib of openssl
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#if defined(_WIN32) && defined(_MT)
+ #if _MSC_VER >= 1900 // VC2015
+ #ifdef _WIN64
+ #pragma comment(lib, "mt/14/ssleay32.lib")
+ #else
+ #pragma comment(lib, "mt/14/32/ssleay32.lib")
+ #endif
+// #elif _MSC_VER == 1800 // VC2013
+ #else
+ #pragma comment(lib, "mt/12/ssleay32.lib")
+ #endif
+ #pragma comment(lib, "user32.lib")
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/mutex.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/mutex.hpp
new file mode 100644
index 000000000..acde6bcbf
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/mutex.hpp
@@ -0,0 +1,141 @@
+#pragma once
+/**
+ @file
+ @brief mutex
+
+ @author MITSUNARI Shigeo(@herumi)
+ @author MITSUNARI Shigeo
+*/
+
+#ifdef _WIN32
+ #include <windows.h>
+#else
+ #include <pthread.h>
+ #include <time.h>
+#endif
+#include <assert.h>
+#include <stdlib.h>
+
+namespace cybozu {
+
+class ConditionVariable;
+
+namespace thread {
+
+#ifdef _WIN32
+ typedef HANDLE MutexHandle;
+ inline void MutexInit(MutexHandle& mutex)
+ {
+// mutex = CreateSemaphore(NULL /* no security */, 1 /* init */, 0x7FFFFFFF /* max */, NULL /* no name */);
+ mutex = CreateMutex(NULL /* no security */, FALSE /* no owner */, NULL /* no name */);
+ }
+ inline void MutexLock(MutexHandle& mutex) { WaitForSingleObject(mutex, INFINITE); }
+ /*
+ return false if timeout
+ @param msec [in] msec
+ */
+ inline bool MutexLockTimeout(MutexHandle& mutex, int msec)
+ {
+ DWORD ret = WaitForSingleObject(mutex, msec);
+ if (ret == WAIT_OBJECT_0) {
+ return true;
+ }
+ if (ret == WAIT_TIMEOUT) {
+ return false;
+ }
+ /* ret == WAIT_ABANDONED */
+ assert(0);
+ return false;
+ }
+ inline void MutexUnlock(MutexHandle& mutex)
+ {
+// ReleaseSemaphore(mutex, 1, NULL);
+ ReleaseMutex(mutex);
+ }
+ inline void MutexTerm(MutexHandle& mutex) { CloseHandle(mutex); }
+#else
+ typedef pthread_mutex_t MutexHandle;
+ inline void MutexInit(MutexHandle& mutex)
+ {
+#if 1
+ pthread_mutex_init(&mutex, NULL);
+#else
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_TIMED_NP)) {
+ perror("pthread_mutexattr_settype");
+ exit(1);
+ }
+ pthread_mutex_init(&mutex, &attr);
+ pthread_mutexattr_destroy(&attr);
+#endif
+ }
+ inline void MutexLock(MutexHandle& mutex) { pthread_mutex_lock(&mutex); }
+#if 0
+ inline bool MutexLockTimeout(MutexHandle& mutex, int msec)
+ {
+ timespec absTime;
+ clock_gettime(CLOCK_REALTIME, &absTime);
+ absTime.tv_sec += msec / 1000;
+ absTime.tv_nsec += msec % 1000;
+ bool ret = pthread_mutex_timedlock(&mutex, &absTime) == 0;
+ return ret;
+ }
+#endif
+ inline void MutexUnlock(MutexHandle& mutex) { pthread_mutex_unlock(&mutex); }
+ inline void MutexTerm(MutexHandle& mutex) { pthread_mutex_destroy(&mutex); }
+#endif
+
+template<class T>
+class AutoLockT {
+public:
+ explicit AutoLockT(T &t)
+ : t_(t)
+ {
+ t_.lock();
+ }
+ ~AutoLockT()
+ {
+ t_.unlock();
+ }
+private:
+ T& t_;
+ AutoLockT& operator=(const AutoLockT&);
+};
+
+} // cybozu::thread
+
+class Mutex {
+ friend class cybozu::ConditionVariable;
+public:
+ Mutex()
+ {
+ thread::MutexInit(hdl_);
+ }
+ ~Mutex()
+ {
+ thread::MutexTerm(hdl_);
+ }
+ void lock()
+ {
+ thread::MutexLock(hdl_);
+ }
+#if 0
+ bool lockTimeout(int msec)
+ {
+ return thread::MutexLockTimeout(hdl_, msec);
+ }
+#endif
+ void unlock()
+ {
+ thread::MutexUnlock(hdl_);
+ }
+private:
+ Mutex(const Mutex&);
+ Mutex& operator=(const Mutex&);
+ thread::MutexHandle hdl_;
+};
+
+typedef cybozu::thread::AutoLockT<cybozu::Mutex> AutoLock;
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/option.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/option.hpp
new file mode 100644
index 000000000..a5dfd137d
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/option.hpp
@@ -0,0 +1,723 @@
+#pragma once
+/**
+ @file
+ @brief command line parser
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <string>
+#include <vector>
+#include <map>
+#include <sstream>
+#include <iostream>
+#include <limits>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <cybozu/exception.hpp>
+#include <cybozu/atoi.hpp>
+
+/*
+ Option parser
+
+ progName (opt1-name|opt2-name|...) param1 param2 ...
+ param1:param1-help
+ param2:param2-help
+ -op1-name:opt1-help
+ ...
+
+ How to setup
+ int num;
+ -n num ; (optional) option => appendOpt(&x, <defaultValue>, "num", "num-help");
+ -n num ; must option => appendMust(&x, "num", "num-help");
+
+ std::vector<int> v;
+ -v s1 s2 s3 ... => appendVec(&v, "v");
+
+ Remark1: terminate parsing of v if argv begins with '-[^0-9]'
+ Remark2: the begining character of opt-name is not a number ('0'...'9')
+ because avoid conflict with minus number
+
+ std::string file1;
+ file1 is param => appendParam(&file1, "input-file");
+ file2 is optional param => appendParamOpt(&file2, "output-file");
+
+ How to use
+ opt.parse(argc, argv);
+
+ see sample/option_smpl.cpp
+*/
+
+namespace cybozu {
+
+struct OptionError : public cybozu::Exception {
+ enum Type {
+ NoError = 0,
+ BAD_OPT = 1,
+ BAD_VALUE,
+ NO_VALUE,
+ OPT_IS_NECESSARY,
+ PARAM_IS_NECESSARY,
+ REDUNDANT_VAL,
+ BAD_ARGC
+ };
+ Type type;
+ int argPos;
+ OptionError()
+ : cybozu::Exception("OptionError", false)
+ , type(NoError)
+ , argPos(0)
+ {
+ }
+ cybozu::Exception& set(Type _type, int _argPos = 0)
+ {
+ this->type = _type;
+ this->argPos = _argPos;
+ switch (_type) {
+ case BAD_OPT:
+ (*this) << "bad opt";
+ break;
+ case BAD_VALUE:
+ (*this) << "bad value";
+ break;
+ case NO_VALUE:
+ (*this) << "no value";
+ break;
+ case OPT_IS_NECESSARY:
+ (*this) << "opt is necessary";
+ break;
+ case PARAM_IS_NECESSARY:
+ (*this) << "param is necessary";
+ break;
+ case REDUNDANT_VAL:
+ (*this) << "redundant argVal";
+ break;
+ case BAD_ARGC:
+ (*this) << "bad argc";
+ default:
+ break;
+ }
+ return *this;
+ }
+};
+
+namespace option_local {
+
+template<class T>
+bool convert(T* x, const char *str)
+{
+ std::istringstream is(str);
+ is >> *x;
+ return !!is;
+}
+
+template<>
+inline bool convert(std::string* x, const char *str)
+{
+ *x = str;
+ return true;
+}
+
+template<class T>
+bool convertInt(T* x, const char *str)
+{
+ if (str[0] == '0' && str[1] == 'x') {
+ bool b;
+ *x = cybozu::hextoi(&b, str + 2);
+ return b;
+ }
+ size_t len = strlen(str);
+ int factor = 1;
+ if (len > 1) {
+ switch (str[len - 1]) {
+ case 'k': factor = 1000; len--; break;
+ case 'm': factor = 1000 * 1000; len--; break;
+ case 'g': factor = 1000 * 1000 * 1000; len--; break;
+ case 'K': factor = 1024; len--; break;
+ case 'M': factor = 1024 * 1024; len--; break;
+ case 'G': factor = 1024 * 1024 * 1024; len--; break;
+ default: break;
+ }
+ }
+ bool b;
+ T y = cybozu::atoi(&b, str, len);
+ if (!b) return false;
+ if (factor > 1) {
+ if ((std::numeric_limits<T>::min)() / factor <= y
+ && y <= (std::numeric_limits<T>::max)() / factor) {
+ *x = y * factor;
+ } else {
+ return false;
+ }
+ } else {
+ *x = y;
+ }
+ return true;
+}
+
+#define CYBOZU_OPTION_DEFINE_CONVERT_INT(type) \
+template<>inline bool convert(type* x, const char *str) { return convertInt(x, str); }
+
+CYBOZU_OPTION_DEFINE_CONVERT_INT(int)
+CYBOZU_OPTION_DEFINE_CONVERT_INT(long)
+CYBOZU_OPTION_DEFINE_CONVERT_INT(long long)
+
+CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned int)
+CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long)
+CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long long)
+
+#undef CYBOZU_OPTION_DEFINE_CONVERT_INT
+
+struct HolderBase {
+ virtual ~HolderBase(){}
+ virtual bool set(const char*) = 0;
+ virtual HolderBase *clone() const = 0;
+ virtual std::string toStr() const = 0;
+ virtual const void *get() const = 0;
+};
+
+template<class T>
+struct Holder : public HolderBase {
+ T *p_;
+ Holder(T *p) : p_(p) {}
+ HolderBase *clone() const { return new Holder(p_); }
+ bool set(const char *str) { return option_local::convert(p_, str); }
+ std::string toStr() const
+ {
+ std::ostringstream os;
+ os << *p_;
+ return os.str();
+ }
+ const void *get() const { return (void*)p_; }
+};
+
+/*
+ for gcc 7 with -fnew-ttp-matching
+ this specialization is not necessary under -fno-new-ttp-matching
+*/
+template struct Holder<std::string>;
+
+template<class T, class Alloc, template<class T_, class Alloc_>class Container>
+struct Holder<Container<T, Alloc> > : public HolderBase {
+ typedef Container<T, Alloc> Vec;
+ Vec *p_;
+ Holder(Vec *p) : p_(p) {}
+ HolderBase *clone() const { return new Holder<Vec>(p_); }
+ bool set(const char *str)
+ {
+ T t;
+ bool b = option_local::convert(&t, str);
+ if (b) p_->push_back(t);
+ return b;
+ }
+ std::string toStr() const
+ {
+ std::ostringstream os;
+ bool isFirst = true;
+ for (typename Vec::const_iterator i = p_->begin(), ie = p_->end(); i != ie; ++i) {
+ if (isFirst) {
+ isFirst = false;
+ } else {
+ os << ' ';
+ }
+ os << *i;
+ }
+ return os.str();
+ }
+ const void *get() const { return (void*)p_; }
+};
+
+class Var {
+ HolderBase *p_;
+ bool isSet_;
+public:
+ Var() : p_(0), isSet_(false) { }
+ Var(const Var& rhs) : p_(rhs.p_->clone()), isSet_(false) { }
+ template<class T>
+ explicit Var(T *x) : p_(new Holder<T>(x)), isSet_(false) { }
+
+ ~Var() { delete p_; }
+
+ void swap(Var& rhs) CYBOZU_NOEXCEPT
+ {
+ std::swap(p_, rhs.p_);
+ std::swap(isSet_, rhs.isSet_);
+ }
+ void operator=(const Var& rhs)
+ {
+ Var v(rhs);
+ swap(v);
+ }
+ bool set(const char *str)
+ {
+ isSet_ = true;
+ return p_->set(str);
+ }
+ std::string toStr() const { return p_ ? p_->toStr() : ""; }
+ bool isSet() const { return isSet_; }
+ const void *get() const { return p_ ? p_->get() : 0; }
+};
+
+} // option_local
+
+class Option {
+ enum Mode { // for opt
+ N_is0 = 0, // for bool by appendBoolOpt()
+ N_is1 = 1,
+ N_any = 2
+ };
+ enum ParamMode {
+ P_exact = 0, // one
+ P_optional = 1, // zero or one
+ P_variable = 2 // zero or greater
+ };
+ struct Info {
+ option_local::Var var;
+ Mode mode; // 0 or 1 or any ; for opt, not used for Param
+ bool isMust; // this option is must
+ std::string opt; // option param name without '-'
+ std::string help; // description of option
+
+ Info() : mode(N_is0), isMust(false) {}
+ template<class T>
+ Info(T* pvar, Mode mode, bool isMust, const char *opt, const std::string& help)
+ : var(pvar)
+ , mode(mode)
+ , isMust(isMust)
+ , opt(opt)
+ , help(help)
+ {
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const Info& self)
+ {
+ os << self.opt << '=' << self.var.toStr();
+ if (self.var.isSet()) {
+ os << " (set)";
+ } else {
+ os << " (default)";
+ }
+ return os;
+ }
+ void put() const
+ {
+ std::cout << *this;
+ }
+ void usage() const
+ {
+ printf(" -%s %s%s\n", opt.c_str(), help.c_str(), isMust ? " (must)" : "");
+ }
+ void shortUsage() const
+ {
+ printf(" -%s %s", opt.c_str(), mode == N_is0 ? "" : mode == N_is1 ? "para" : "para...");
+ }
+ bool isSet() const { return var.isSet(); }
+ const void *get() const { return var.get(); }
+ };
+ typedef std::vector<Info> InfoVec;
+ typedef std::vector<std::string> StrVec;
+ typedef std::map<std::string, size_t> OptMap;
+ InfoVec infoVec_;
+ InfoVec paramVec_;
+ Info remains_;
+ OptMap optMap_;
+ bool showOptUsage_;
+ ParamMode paramMode_;
+ std::string progName_;
+ std::string desc_;
+ std::string helpOpt_;
+ std::string help_;
+ std::string usage_;
+ StrVec delimiters_;
+ StrVec *remainsAfterDelimiter_;
+ int nextDelimiter_;
+ template<class T>
+ void appendSub(T *pvar, Mode mode, bool isMust, const char *opt, const std::string& help)
+ {
+ const char c = opt[0];
+ if ('0' <= c && c <= '9') throw cybozu::Exception("Option::appendSub:opt must begin with not number") << opt;
+ if (optMap_.find(opt) != optMap_.end()) {
+ throw cybozu::Exception("Option::append:duplicate option") << opt;
+ }
+ optMap_[opt] = infoVec_.size();
+ infoVec_.push_back(Info(pvar, mode, isMust, opt, help));
+ }
+
+ template<class T, class U>
+ void append(T *pvar, const U& defaultVal, bool isMust, const char *opt, const std::string& help = "")
+ {
+ *pvar = defaultVal;
+ appendSub(pvar, N_is1, isMust, opt, help);
+ }
+ /*
+ don't deal with negative number as option
+ */
+ bool isOpt(const char *str) const
+ {
+ if (str[0] != '-') return false;
+ const char c = str[1];
+ if ('0' <= c && c <= '9') return false;
+ return true;
+ }
+ void verifyParamMode()
+ {
+ if (paramMode_ != P_exact) throw cybozu::Exception("Option:appendParamVec:appendParam is forbidden after appendParamOpt/appendParamVec");
+ }
+ std::string getBaseName(const std::string& name) const
+ {
+ size_t pos = name.find_last_of("/\\");
+ if (pos == std::string::npos) return name;
+ return name.substr(pos + 1);
+ }
+ bool inDelimiters(const std::string& str) const
+ {
+ return std::find(delimiters_.begin(), delimiters_.end(), str) != delimiters_.end();
+ }
+public:
+ Option()
+ : showOptUsage_(true)
+ , paramMode_(P_exact)
+ , remainsAfterDelimiter_(0)
+ , nextDelimiter_(-1)
+ {
+ }
+ virtual ~Option() {}
+ /*
+ append optional option with default value
+ @param pvar [in] pointer to option variable
+ @param defaultVal [in] default value
+ @param opt [in] option name
+ @param help [in] option help
+ @note you can use 123k, 56M if T is int/long/long long
+ k : *1000
+ m : *1000000
+ g : *1000000000
+ K : *1024
+ M : *1024*1024
+ G : *1024*1024*1024
+ */
+ template<class T, class U>
+ void appendOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "")
+ {
+ append(pvar, defaultVal, false, opt, help);
+ }
+ /*
+ default value of *pvar is false
+ */
+ void appendBoolOpt(bool *pvar, const char *opt, const std::string& help = "")
+ {
+ *pvar = false;
+ appendSub(pvar, N_is0, false, opt, help);
+ }
+ /*
+ append necessary option
+ @param pvar [in] pointer to option variable
+ @param opt [in] option name
+ @param help [in] option help
+ */
+ template<class T>
+ void appendMust(T *pvar, const char *opt, const std::string& help = "")
+ {
+ append(pvar, T(), true, opt, help);
+ }
+ /*
+ append vector option
+ @param pvar [in] pointer to option variable
+ @param opt [in] option name
+ @param help [in] option help
+ */
+ template<class T, class Alloc, template<class T_, class Alloc_>class Container>
+ void appendVec(Container<T, Alloc> *pvar, const char *opt, const std::string& help = "")
+ {
+ appendSub(pvar, N_any, false, opt, help);
+ }
+ /*
+ append parameter
+ @param pvar [in] pointer to parameter
+ @param opt [in] option name
+ @param help [in] option help
+ */
+ template<class T>
+ void appendParam(T *pvar, const char *opt, const std::string& help = "")
+ {
+ verifyParamMode();
+ paramVec_.push_back(Info(pvar, N_is1, true, opt, help));
+ }
+ /*
+ append optional parameter
+ @param pvar [in] pointer to parameter
+ @param defaultVal [in] default value
+ @param opt [in] option name
+ @param help [in] option help
+ @note you can call appendParamOpt once after appendParam
+ */
+ template<class T, class U>
+ void appendParamOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "")
+ {
+ verifyParamMode();
+ *pvar = defaultVal;
+ paramMode_ = P_optional;
+ paramVec_.push_back(Info(pvar, N_is1, false, opt, help));
+ }
+ /*
+ append remain parameter
+ @param pvar [in] pointer to vector of parameter
+ @param opt [in] option name
+ @param help [in] option help
+ @note you can call appendParamVec once after appendParam
+ */
+ template<class T, class Alloc, template<class T_, class Alloc_>class Container>
+ void appendParamVec(Container<T, Alloc> *pvar, const char *name, const std::string& help = "")
+ {
+ verifyParamMode();
+ paramMode_ = P_variable;
+ remains_.var = option_local::Var(pvar);
+ remains_.mode = N_any;
+ remains_.isMust = false;
+ remains_.opt = name;
+ remains_.help = help;
+ }
+ void appendHelp(const char *opt, const std::string& help = ": show this message")
+ {
+ helpOpt_ = opt;
+ help_ = help;
+ }
+ /*
+ stop parsing after delimiter is found
+ @param delimiter [in] string to stop
+ @param remain [out] set remaining strings if remain
+ */
+ void setDelimiter(const std::string& delimiter, std::vector<std::string> *remain = 0)
+ {
+ delimiters_.push_back(delimiter);
+ remainsAfterDelimiter_ = remain;
+ }
+ /*
+ stop parsing after delimiter is found
+ @param delimiter [in] string to stop to append list of delimiters
+ */
+ void appendDelimiter(const std::string& delimiter)
+ {
+ delimiters_.push_back(delimiter);
+ }
+ /*
+ clear list of delimiters
+ */
+ void clearDelimiterList() { delimiters_.clear(); }
+ /*
+ return the next position of delimiter between [0, argc]
+ @note return argc if delimiter is not set nor found
+ */
+ int getNextPositionOfDelimiter() const { return nextDelimiter_; }
+ /*
+ parse (argc, argv)
+ @param argc [in] argc of main
+ @param argv [in] argv of main
+ @param startPos [in] start position of argc
+ @param progName [in] used instead of argv[0]
+ */
+ bool parse(int argc, const char *const argv[], int startPos = 1, const char *progName = 0)
+ {
+ if (argc < 1 || startPos > argc) return false;
+ progName_ = getBaseName(progName ? progName : argv[startPos - 1]);
+ nextDelimiter_ = argc;
+ OptionError err;
+ for (int pos = startPos; pos < argc; pos++) {
+ if (inDelimiters(argv[pos])) {
+ nextDelimiter_ = pos + 1;
+ if (remainsAfterDelimiter_) {
+ for (int i = nextDelimiter_; i < argc; i++) {
+ remainsAfterDelimiter_->push_back(argv[i]);
+ }
+ }
+ break;
+ }
+ if (isOpt(argv[pos])) {
+ const std::string str = argv[pos] + 1;
+ if (helpOpt_ == str) {
+ usage();
+ exit(0);
+ }
+ OptMap::const_iterator i = optMap_.find(str);
+ if (i == optMap_.end()) {
+ err.set(OptionError::BAD_OPT, pos);
+ goto ERR;
+ }
+
+ Info& info = infoVec_[i->second];
+ switch (info.mode) {
+ case N_is0:
+ if (!info.var.set("1")) {
+ err.set(OptionError::BAD_VALUE, pos);
+ goto ERR;
+ }
+ break;
+ case N_is1:
+ pos++;
+ if (pos == argc) {
+ err.set(OptionError::BAD_VALUE, pos) << (std::string("no value for -") + info.opt);
+ goto ERR;
+ }
+ if (!info.var.set(argv[pos])) {
+ err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt);
+ goto ERR;
+ }
+ break;
+ case N_any:
+ default:
+ {
+ pos++;
+ int j = 0;
+ while (pos < argc && !isOpt(argv[pos])) {
+ if (!info.var.set(argv[pos])) {
+ err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt) << j;
+ goto ERR;
+ }
+ pos++;
+ j++;
+ }
+ if (j > 0) {
+ pos--;
+ } else {
+ err.set(OptionError::NO_VALUE, pos) << (std::string("for -") + info.opt);
+ goto ERR;
+ }
+ }
+ break;
+ }
+ } else {
+ bool used = false;
+ for (size_t i = 0; i < paramVec_.size(); i++) {
+ Info& param = paramVec_[i];
+ if (!param.var.isSet()) {
+ if (!param.var.set(argv[pos])) {
+ err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for " + param.opt);
+ goto ERR;
+ }
+ used = true;
+ break;
+ }
+ }
+ if (!used) {
+ if (paramMode_ == P_variable) {
+ remains_.var.set(argv[pos]);
+ } else {
+ err.set(OptionError::REDUNDANT_VAL, pos) << argv[pos];
+ goto ERR;
+ }
+ }
+ }
+ }
+ // check whether must-opt is set
+ for (size_t i = 0; i < infoVec_.size(); i++) {
+ const Info& info = infoVec_[i];
+ if (info.isMust && !info.var.isSet()) {
+ err.set(OptionError::OPT_IS_NECESSARY) << info.opt;
+ goto ERR;
+ }
+ }
+ // check whether param is set
+ for (size_t i = 0; i < paramVec_.size(); i++) {
+ const Info& param = paramVec_[i];
+ if (param.isMust && !param.var.isSet()) {
+ err.set(OptionError::PARAM_IS_NECESSARY) << param.opt;
+ goto ERR;
+ }
+ }
+ // check whether remains is set
+ if (paramMode_ == P_variable && remains_.isMust && !remains_.var.isSet()) {
+ err.set(OptionError::PARAM_IS_NECESSARY) << remains_.opt;
+ goto ERR;
+ }
+ return true;
+ ERR:
+ assert(err.type);
+ printf("%s\n", err.what());
+ return false;
+ }
+ /*
+ show desc at first in usage()
+ */
+ void setDescription(const std::string& desc)
+ {
+ desc_ = desc;
+ }
+ /*
+ show command line after desc
+ don't put option message if not showOptUsage
+ */
+ void setUsage(const std::string& usage, bool showOptUsage = false)
+ {
+ usage_ = usage;
+ showOptUsage_ = showOptUsage;
+ }
+ void usage() const
+ {
+ if (!desc_.empty()) printf("%s\n", desc_.c_str());
+ if (usage_.empty()) {
+ printf("usage:%s", progName_.c_str());
+ if (!infoVec_.empty()) printf(" [opt]");
+ for (size_t i = 0; i < infoVec_.size(); i++) {
+ if (infoVec_[i].isMust) infoVec_[i].shortUsage();
+ }
+ for (size_t i = 0; i < paramVec_.size(); i++) {
+ printf(" %s", paramVec_[i].opt.c_str());
+ }
+ if (paramMode_ == P_variable) {
+ printf(" %s", remains_.opt.c_str());
+ }
+ printf("\n");
+ } else {
+ printf("%s\n", usage_.c_str());
+ if (!showOptUsage_) return;
+ }
+ for (size_t i = 0; i < paramVec_.size(); i++) {
+ const Info& param = paramVec_[i];
+ if (!param.help.empty()) printf(" %s %s\n", paramVec_[i].opt.c_str(), paramVec_[i].help.c_str());
+ }
+ if (!remains_.help.empty()) printf(" %s %s\n", remains_.opt.c_str(), remains_.help.c_str());
+ if (!helpOpt_.empty()) {
+ printf(" -%s %s\n", helpOpt_.c_str(), help_.c_str());
+ }
+ for (size_t i = 0; i < infoVec_.size(); i++) {
+ infoVec_[i].usage();
+ }
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const Option& self)
+ {
+ for (size_t i = 0; i < self.paramVec_.size(); i++) {
+ const Info& param = self.paramVec_[i];
+ os << param.opt << '=' << param.var.toStr() << std::endl;
+ }
+ if (self.paramMode_ == P_variable) {
+ os << "remains=" << self.remains_.var.toStr() << std::endl;
+ }
+ for (size_t i = 0; i < self.infoVec_.size(); i++) {
+ os << self.infoVec_[i] << std::endl;
+ }
+ return os;
+ }
+ void put() const
+ {
+ std::cout << *this;
+ }
+ /*
+ whether pvar is set or not
+ */
+ template<class T>
+ bool isSet(const T* pvar) const
+ {
+ const void *p = static_cast<const void*>(pvar);
+ for (size_t i = 0; i < paramVec_.size(); i++) {
+ const Info& v = paramVec_[i];
+ if (v.get() == p) return v.isSet();
+ }
+ if (remains_.get() == p) return remains_.isSet();
+ for (size_t i = 0; i < infoVec_.size(); i++) {
+ const Info& v = infoVec_[i];
+ if (v.get() == p) return v.isSet();
+ }
+ throw cybozu::Exception("Option:isSet:no assigned var") << pvar;
+ }
+};
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/random_generator.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/random_generator.hpp
new file mode 100644
index 000000000..ff4a78da5
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/random_generator.hpp
@@ -0,0 +1,153 @@
+#pragma once
+/**
+ @file
+ @brief pseudrandom generator
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+
+#include <cybozu/exception.hpp>
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <wincrypt.h>
+#ifdef _MSC_VER
+#pragma comment (lib, "advapi32.lib")
+#endif
+#include <cybozu/critical_section.hpp>
+#else
+#include <sys/types.h>
+#include <fcntl.h>
+#endif
+
+namespace cybozu {
+
+class RandomGenerator {
+ RandomGenerator(const RandomGenerator&);
+ void operator=(const RandomGenerator&);
+public:
+ uint32_t operator()()
+ {
+ return get32();
+ }
+ uint32_t get32()
+ {
+ uint32_t ret;
+ read(&ret, 1);
+ return ret;
+ }
+ uint64_t get64()
+ {
+ uint64_t ret;
+ read(&ret, 1);
+ return ret;
+ }
+#ifdef _WIN32
+ RandomGenerator()
+ : prov_(0)
+ , pos_(bufSize)
+ {
+ DWORD flagTbl[] = { 0, CRYPT_NEWKEYSET };
+ for (int i = 0; i < 2; i++) {
+ if (CryptAcquireContext(&prov_, NULL, NULL, PROV_RSA_FULL, flagTbl[i]) != 0) return;
+ }
+ throw cybozu::Exception("randomgenerator");
+ }
+ bool read_inner(void *buf, size_t byteSize)
+ {
+ return CryptGenRandom(prov_, static_cast<DWORD>(byteSize), static_cast<BYTE*>(buf)) != 0;
+ }
+ ~RandomGenerator()
+ {
+ if (prov_) {
+ CryptReleaseContext(prov_, 0);
+ }
+ }
+ /*
+ fill buf[0..bufNum-1] with random data
+ @note bufNum is not byte size
+ */
+ template<class T>
+ void read(bool *pb, T *buf, size_t bufNum)
+ {
+ cybozu::AutoLockCs al(cs_);
+ const size_t byteSize = sizeof(T) * bufNum;
+ if (byteSize > bufSize) {
+ if (!read_inner(buf, byteSize)) {
+ *pb = false;
+ return;
+ }
+ } else {
+ if (pos_ + byteSize > bufSize) {
+ read_inner(buf_, bufSize);
+ pos_ = 0;
+ }
+ memcpy(buf, buf_ + pos_, byteSize);
+ pos_ += byteSize;
+ }
+ *pb = true;
+ }
+ template<class T>
+ void read(T *buf, size_t bufNum)
+ {
+ bool b;
+ read(&b, buf, bufNum);
+ if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum;
+ }
+private:
+ HCRYPTPROV prov_;
+ static const size_t bufSize = 1024;
+ char buf_[bufSize];
+ size_t pos_;
+ cybozu::CriticalSection cs_;
+#else
+ RandomGenerator()
+ : fp_(::fopen("/dev/urandom", "rb"))
+ {
+ if (!fp_) throw cybozu::Exception("randomgenerator");
+ }
+ ~RandomGenerator()
+ {
+ if (fp_) ::fclose(fp_);
+ }
+ /*
+ fill buf[0..bufNum-1] with random data
+ @note bufNum is not byte size
+ */
+ template<class T>
+ void read(bool *pb, T *buf, size_t bufNum)
+ {
+ const size_t byteSize = sizeof(T) * bufNum;
+ *pb = ::fread(buf, 1, (int)byteSize, fp_) == byteSize;
+ }
+ template<class T>
+ void read(T *buf, size_t bufNum)
+ {
+ bool b;
+ read(&b, buf, bufNum);
+ if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum;
+ }
+#endif
+private:
+ FILE *fp_;
+};
+
+template<class T, class RG>
+void shuffle(T* v, size_t n, RG& rg)
+{
+ if (n <= 1) return;
+ for (size_t i = 0; i < n - 1; i++) {
+ size_t r = i + size_t(rg.get64() % (n - i));
+ using namespace std;
+ swap(v[i], v[r]);
+ }
+}
+
+template<class V, class RG>
+void shuffle(V& v, RG& rg)
+{
+ shuffle(v.data(), v.size(), rg);
+}
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/serializer.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/serializer.hpp
new file mode 100644
index 000000000..1e23c8f42
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/serializer.hpp
@@ -0,0 +1,363 @@
+#pragma once
+/**
+ @file
+ @brief serializer for vector, list, map and so on
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <assert.h>
+#include <cybozu/stream.hpp>
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4127)
+#endif
+
+//#define CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER
+
+namespace cybozu {
+
+namespace serializer_local {
+
+template<class T>
+union ci {
+ T i;
+ uint8_t c[sizeof(T)];
+};
+
+template<class S, void (S::*)(size_t)>
+struct HasMemFunc { };
+
+template<class T>
+void dispatch_reserve(T& t, size_t size, int, HasMemFunc<T, &T::reserve>* = 0)
+{
+ t.reserve(size);
+}
+
+template<class T>
+void dispatch_reserve(T&, size_t, int*)
+{
+}
+
+template<class T>
+void reserve_if_exists(T& t, size_t size)
+{
+ dispatch_reserve(t, size, 0);
+}
+
+} // serializer_local
+
+template<class InputStream, class T>
+void loadRange(T *p, size_t num, InputStream& is)
+{
+ cybozu::read(p, num * sizeof(T), is);
+}
+
+template<class OutputStream, class T>
+void saveRange(OutputStream& os, const T *p, size_t num)
+{
+ cybozu::write(os, p, num * sizeof(T));
+}
+
+template<class InputStream, class T>
+void loadPod(T& x, InputStream& is)
+{
+ serializer_local::ci<T> ci;
+ loadRange(ci.c, sizeof(ci.c), is);
+ x = ci.i;
+}
+
+template<class OutputStream, class T>
+void savePod(OutputStream& os, const T& x)
+{
+ serializer_local::ci<T> ci;
+ ci.i = x;
+ saveRange(os, ci.c, sizeof(ci.c));
+}
+
+template<class InputStream, class T>
+void load(T& x, InputStream& is)
+{
+ x.load(is);
+}
+
+template<class OutputStream, class T>
+void save(OutputStream& os, const T& x)
+{
+ x.save(os);
+}
+
+#define CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) \
+template<class InputStream>void load(type& x, InputStream& is) { loadPod(x, is); } \
+template<class OutputStream>void save(OutputStream& os, type x) { savePod(os, x); }
+
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(bool)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(char)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(short)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned char)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned short)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(wchar_t)
+
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(float)
+CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(double)
+
+#ifdef CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER
+
+#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type)
+
+#else
+
+namespace serializer_local {
+
+template<class S, class T>
+bool isRecoverable(T x)
+{
+ return T(S(x)) == x;
+}
+/*
+ data structure H:D of integer x
+ H:header(1byte)
+ 0x80 ; D = 1 byte zero ext
+ 0x81 ; D = 2 byte zero ext
+ 0x82 ; D = 4 byte zero ext
+ 0x83 ; D = 8 byte zero ext
+ 0x84 ; D = 1 byte signed ext
+ 0x85 ; D = 2 byte signed ext
+ 0x86 ; D = 4 byte signed ext
+ 0x87 ; D = 8 byte signed ext
+ other; x = signed H, D = none
+*/
+template<class OutputStream, class T>
+void saveVariableInt(OutputStream& os, const T& x)
+{
+ if (isRecoverable<int8_t>(x)) {
+ uint8_t u8 = uint8_t(x);
+ if (unsigned(u8 - 0x80) <= 7) {
+ savePod(os, uint8_t(0x84));
+ }
+ savePod(os, u8);
+ } else if (isRecoverable<uint8_t>(x)) {
+ savePod(os, uint8_t(0x80));
+ savePod(os, uint8_t(x));
+ } else if (isRecoverable<uint16_t>(x) || isRecoverable<int16_t>(x)) {
+ savePod(os, uint8_t(isRecoverable<uint16_t>(x) ? 0x81 : 0x85));
+ savePod(os, uint16_t(x));
+ } else if (isRecoverable<uint32_t>(x) || isRecoverable<int32_t>(x)) {
+ savePod(os, uint8_t(isRecoverable<uint32_t>(x) ? 0x82 : 0x86));
+ savePod(os, uint32_t(x));
+ } else {
+ assert(sizeof(T) == 8);
+ savePod(os, uint8_t(0x83));
+ savePod(os, uint64_t(x));
+ }
+}
+
+template<class InputStream, class T>
+void loadVariableInt(T& x, InputStream& is)
+{
+ uint8_t h;
+ loadPod(h, is);
+ if (h == 0x80) {
+ uint8_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x81) {
+ uint16_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x82) {
+ uint32_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x83) {
+ if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h;
+ uint64_t v;
+ loadPod(v, is);
+ x = static_cast<T>(v);
+ } else if (h == 0x84) {
+ int8_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x85) {
+ int16_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x86) {
+ int32_t v;
+ loadPod(v, is);
+ x = v;
+ } else if (h == 0x87) {
+ if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h;
+ int64_t v;
+ loadPod(v, is);
+ x = static_cast<T>(v);
+ } else {
+ x = static_cast<int8_t>(h);
+ }
+}
+
+} // serializer_local
+
+#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) \
+template<class InputStream>void load(type& x, InputStream& is) { serializer_local::loadVariableInt(x, is); } \
+template<class OutputStream>void save(OutputStream& os, type x) { serializer_local::saveVariableInt(os, x); }
+
+#endif
+
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(int)
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long)
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long long)
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned int)
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long)
+CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long long)
+
+#undef CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER
+#undef CYBOZU_SERIALIZER_MAKE_UNT_SERIALIZER
+#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_F
+#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_V
+
+// only for std::vector<POD>
+template<class V, class InputStream>
+void loadPodVec(V& v, InputStream& is)
+{
+ size_t size;
+ load(size, is);
+ v.resize(size);
+ if (size > 0) loadRange(&v[0], size, is);
+}
+
+// only for std::vector<POD>
+template<class V, class OutputStream>
+void savePodVec(OutputStream& os, const V& v)
+{
+ save(os, v.size());
+ if (!v.empty()) saveRange(os, &v[0], v.size());
+}
+
+template<class InputStream>
+void load(std::string& str, InputStream& is)
+{
+ loadPodVec(str, is);
+}
+
+template<class OutputStream>
+void save(OutputStream& os, const std::string& str)
+{
+ savePodVec(os, str);
+}
+
+template<class OutputStream>
+void save(OutputStream& os, const char *x)
+{
+ const size_t len = strlen(x);
+ save(os, len);
+ if (len > 0) saveRange(os, x, len);
+}
+
+
+// for vector, list
+template<class InputStream, class T, class Alloc, template<class T_, class Alloc_>class Container>
+void load(Container<T, Alloc>& x, InputStream& is)
+{
+ size_t size;
+ load(size, is);
+ serializer_local::reserve_if_exists(x, size);
+ for (size_t i = 0; i < size; i++) {
+ x.push_back(T());
+ T& t = x.back();
+ load(t, is);
+ }
+}
+
+template<class OutputStream, class T, class Alloc, template<class T_, class Alloc_>class Container>
+void save(OutputStream& os, const Container<T, Alloc>& x)
+{
+ typedef Container<T, Alloc> V;
+ save(os, x.size());
+ for (typename V::const_iterator i = x.begin(), end = x.end(); i != end; ++i) {
+ save(os, *i);
+ }
+}
+
+// for set
+template<class InputStream, class K, class Pred, class Alloc, template<class K_, class Pred_, class Alloc_>class Container>
+void load(Container<K, Pred, Alloc>& x, InputStream& is)
+{
+ size_t size;
+ load(size, is);
+ for (size_t i = 0; i < size; i++) {
+ K t;
+ load(t, is);
+ x.insert(t);
+ }
+}
+
+template<class OutputStream, class K, class Pred, class Alloc, template<class K_, class Pred_, class Alloc_>class Container>
+void save(OutputStream& os, const Container<K, Pred, Alloc>& x)
+{
+ typedef Container<K, Pred, Alloc> Set;
+ save(os, x.size());
+ for (typename Set::const_iterator i = x.begin(), end = x.end(); i != end; ++i) {
+ save(os, *i);
+ }
+}
+
+// for map
+template<class InputStream, class K, class V, class Pred, class Alloc, template<class K_, class V_, class Pred_, class Alloc_>class Container>
+void load(Container<K, V, Pred, Alloc>& x, InputStream& is)
+{
+ typedef Container<K, V, Pred, Alloc> Map;
+ size_t size;
+ load(size, is);
+ for (size_t i = 0; i < size; i++) {
+ std::pair<typename Map::key_type, typename Map::mapped_type> vt;
+ load(vt.first, is);
+ load(vt.second, is);
+ x.insert(vt);
+ }
+}
+
+template<class OutputStream, class K, class V, class Pred, class Alloc, template<class K_, class V_, class Pred_, class Alloc_>class Container>
+void save(OutputStream& os, const Container<K, V, Pred, Alloc>& x)
+{
+ typedef Container<K, V, Pred, Alloc> Map;
+ save(os, x.size());
+ for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) {
+ save(os, i->first);
+ save(os, i->second);
+ }
+}
+
+// unordered_map
+template<class InputStream, class K, class V, class Hash, class Pred, class Alloc, template<class K_, class V_, class Hash_, class Pred_, class Alloc_>class Container>
+void load(Container<K, V, Hash, Pred, Alloc>& x, InputStream& is)
+{
+ typedef Container<K, V, Hash, Pred, Alloc> Map;
+ size_t size;
+ load(size, is);
+// x.reserve(size); // tr1::unordered_map may not have reserve
+ cybozu::serializer_local::reserve_if_exists(x, size);
+ for (size_t i = 0; i < size; i++) {
+ std::pair<typename Map::key_type, typename Map::mapped_type> vt;
+ load(vt.first, is);
+ load(vt.second, is);
+ x.insert(vt);
+ }
+}
+
+template<class OutputStream, class K, class V, class Hash, class Pred, class Alloc, template<class K_, class V_, class Hash_, class Pred_, class Alloc_>class Container>
+void save(OutputStream& os, const Container<K, V, Hash, Pred, Alloc>& x)
+{
+ typedef Container<K, V, Hash, Pred, Alloc> Map;
+ save(os, x.size());
+ for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) {
+ save(os, i->first);
+ save(os, i->second);
+ }
+}
+
+} // cybozu
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/sha2.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/sha2.hpp
new file mode 100644
index 000000000..1830936f0
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/sha2.hpp
@@ -0,0 +1,467 @@
+#pragma once
+/**
+ @file
+ @brief SHA-256, SHA-512 class
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#if !defined(CYBOZU_DONT_USE_OPENSSL) && !defined(MCL_DONT_USE_OPENSSL)
+ #define CYBOZU_USE_OPENSSL_SHA
+#endif
+
+#ifndef CYBOZU_DONT_USE_STRING
+#include <string>
+#endif
+
+#ifdef CYBOZU_USE_OPENSSL_SHA
+#ifdef __APPLE__
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+#include <openssl/sha.h>
+#ifdef _MSC_VER
+ #include <cybozu/link_libeay32.hpp>
+#endif
+
+#ifdef __APPLE__
+ #pragma GCC diagnostic pop
+#endif
+
+namespace cybozu {
+
+class Sha256 {
+ SHA256_CTX ctx_;
+public:
+ Sha256()
+ {
+ clear();
+ }
+ void clear()
+ {
+ SHA256_Init(&ctx_);
+ }
+ void update(const void *buf, size_t bufSize)
+ {
+ SHA256_Update(&ctx_, buf, bufSize);
+ }
+ size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize)
+ {
+ if (mdSize < SHA256_DIGEST_LENGTH) return 0;
+ update(buf, bufSize);
+ SHA256_Final(reinterpret_cast<uint8_t*>(md), &ctx_);
+ return SHA256_DIGEST_LENGTH;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ void update(const std::string& buf)
+ {
+ update(buf.c_str(), buf.size());
+ }
+ std::string digest(const std::string& buf)
+ {
+ return digest(buf.c_str(), buf.size());
+ }
+ std::string digest(const void *buf, size_t bufSize)
+ {
+ std::string md(SHA256_DIGEST_LENGTH, 0);
+ digest(&md[0], md.size(), buf, bufSize);
+ return md;
+ }
+#endif
+};
+
+class Sha512 {
+ SHA512_CTX ctx_;
+public:
+ Sha512()
+ {
+ clear();
+ }
+ void clear()
+ {
+ SHA512_Init(&ctx_);
+ }
+ void update(const void *buf, size_t bufSize)
+ {
+ SHA512_Update(&ctx_, buf, bufSize);
+ }
+ size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize)
+ {
+ if (mdSize < SHA512_DIGEST_LENGTH) return 0;
+ update(buf, bufSize);
+ SHA512_Final(reinterpret_cast<uint8_t*>(md), &ctx_);
+ return SHA512_DIGEST_LENGTH;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ void update(const std::string& buf)
+ {
+ update(buf.c_str(), buf.size());
+ }
+ std::string digest(const std::string& buf)
+ {
+ return digest(buf.c_str(), buf.size());
+ }
+ std::string digest(const void *buf, size_t bufSize)
+ {
+ std::string md(SHA512_DIGEST_LENGTH, 0);
+ digest(&md[0], md.size(), buf, bufSize);
+ return md;
+ }
+#endif
+};
+
+} // cybozu
+
+#else
+
+#include <cybozu/endian.hpp>
+#include <memory.h>
+#include <assert.h>
+
+namespace cybozu {
+
+namespace sha2_local {
+
+template<class T>
+T min_(T x, T y) { return x < y ? x : y;; }
+
+inline uint32_t rot32(uint32_t x, int s)
+{
+#ifdef _MSC_VER
+ return _rotr(x, s);
+#else
+ return (x >> s) | (x << (32 - s));
+#endif
+}
+
+inline uint64_t rot64(uint64_t x, int s)
+{
+#ifdef _MSC_VER
+ return _rotr64(x, s);
+#else
+ return (x >> s) | (x << (64 - s));
+#endif
+}
+
+template<class T>
+struct Common {
+ void term(const char *buf, size_t bufSize)
+ {
+ assert(bufSize < T::blockSize_);
+ T& self = static_cast<T&>(*this);
+ const uint64_t totalSize = self.totalSize_ + bufSize;
+
+ uint8_t last[T::blockSize_];
+ memcpy(last, buf, bufSize);
+ last[bufSize] = uint8_t(0x80); /* top bit = 1 */
+ memset(&last[bufSize + 1], 0, T::blockSize_ - bufSize - 1);
+ if (bufSize >= T::blockSize_ - T::msgLenByte_) {
+ self.round(reinterpret_cast<const char*>(last));
+ memset(last, 0, sizeof(last)); // clear stack
+ }
+ cybozu::Set64bitAsBE(&last[T::blockSize_ - 8], totalSize * 8);
+ self.round(reinterpret_cast<const char*>(last));
+ }
+ void inner_update(const char *buf, size_t bufSize)
+ {
+ T& self = static_cast<T&>(*this);
+ if (bufSize == 0) return;
+ if (self.roundBufSize_ > 0) {
+ size_t size = sha2_local::min_(T::blockSize_ - self.roundBufSize_, bufSize);
+ memcpy(self.roundBuf_ + self.roundBufSize_, buf, size);
+ self.roundBufSize_ += size;
+ buf += size;
+ bufSize -= size;
+ }
+ if (self.roundBufSize_ == T::blockSize_) {
+ self.round(self.roundBuf_);
+ self.roundBufSize_ = 0;
+ }
+ while (bufSize >= T::blockSize_) {
+ assert(self.roundBufSize_ == 0);
+ self.round(buf);
+ buf += T::blockSize_;
+ bufSize -= T::blockSize_;
+ }
+ if (bufSize > 0) {
+ assert(bufSize < T::blockSize_);
+ assert(self.roundBufSize_ == 0);
+ memcpy(self.roundBuf_, buf, bufSize);
+ self.roundBufSize_ = bufSize;
+ }
+ assert(self.roundBufSize_ < T::blockSize_);
+ }
+};
+
+} // cybozu::sha2_local
+
+class Sha256 : public sha2_local::Common<Sha256> {
+ friend struct sha2_local::Common<Sha256>;
+private:
+ static const size_t blockSize_ = 64;
+ static const size_t hSize_ = 8;
+ static const size_t msgLenByte_ = 8;
+ uint64_t totalSize_;
+ size_t roundBufSize_;
+ char roundBuf_[blockSize_];
+ uint32_t h_[hSize_];
+ static const size_t outByteSize_ = hSize_ * sizeof(uint32_t);
+ const uint32_t *k_;
+
+ /**
+ @param buf [in] buffer(64byte)
+ */
+ void round(const char *buf)
+ {
+ using namespace sha2_local;
+ uint32_t w[64];
+ for (int i = 0; i < 16; i++) {
+ w[i] = cybozu::Get32bitAsBE(&buf[i * 4]);
+ }
+ for (int i = 16 ; i < 64; i++) {
+ uint32_t t = w[i - 15];
+ uint32_t s0 = rot32(t, 7) ^ rot32(t, 18) ^ (t >> 3);
+ t = w[i - 2];
+ uint32_t s1 = rot32(t, 17) ^ rot32(t, 19) ^ (t >> 10);
+ w[i] = w[i - 16] + s0 + w[i - 7] + s1;
+ }
+ uint32_t a = h_[0];
+ uint32_t b = h_[1];
+ uint32_t c = h_[2];
+ uint32_t d = h_[3];
+ uint32_t e = h_[4];
+ uint32_t f = h_[5];
+ uint32_t g = h_[6];
+ uint32_t h = h_[7];
+ for (int i = 0; i < 64; i++) {
+ uint32_t s1 = rot32(e, 6) ^ rot32(e, 11) ^ rot32(e, 25);
+ uint32_t ch = g ^ (e & (f ^ g));
+ uint32_t t1 = h + s1 + ch + k_[i] + w[i];
+ uint32_t s0 = rot32(a, 2) ^ rot32(a, 13) ^ rot32(a, 22);
+ uint32_t maj = ((a | b) & c) | (a & b);
+ uint32_t t2 = s0 + maj;
+ h = g;
+ g = f;
+ f = e;
+ e = d + t1;
+ d = c;
+ c = b;
+ b = a;
+ a = t1 + t2;
+ }
+ h_[0] += a;
+ h_[1] += b;
+ h_[2] += c;
+ h_[3] += d;
+ h_[4] += e;
+ h_[5] += f;
+ h_[6] += g;
+ h_[7] += h;
+ totalSize_ += blockSize_;
+ }
+public:
+ Sha256()
+ {
+ clear();
+ }
+ void clear()
+ {
+ static const uint32_t kTbl[] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+ };
+ k_ = kTbl;
+ totalSize_ = 0;
+ roundBufSize_ = 0;
+ h_[0] = 0x6a09e667;
+ h_[1] = 0xbb67ae85;
+ h_[2] = 0x3c6ef372;
+ h_[3] = 0xa54ff53a;
+ h_[4] = 0x510e527f;
+ h_[5] = 0x9b05688c;
+ h_[6] = 0x1f83d9ab;
+ h_[7] = 0x5be0cd19;
+ }
+ void update(const void *buf, size_t bufSize)
+ {
+ inner_update(reinterpret_cast<const char*>(buf), bufSize);
+ }
+ size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize)
+ {
+ if (mdSize < outByteSize_) return 0;
+ update(buf, bufSize);
+ term(roundBuf_, roundBufSize_);
+ char *p = reinterpret_cast<char*>(md);
+ for (size_t i = 0; i < hSize_; i++) {
+ cybozu::Set32bitAsBE(&p[i * sizeof(h_[0])], h_[i]);
+ }
+ return outByteSize_;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ void update(const std::string& buf)
+ {
+ update(buf.c_str(), buf.size());
+ }
+ std::string digest(const std::string& buf)
+ {
+ return digest(buf.c_str(), buf.size());
+ }
+ std::string digest(const void *buf, size_t bufSize)
+ {
+ std::string md(outByteSize_, 0);
+ digest(&md[0], md.size(), buf, bufSize);
+ return md;
+ }
+#endif
+};
+
+class Sha512 : public sha2_local::Common<Sha512> {
+ friend struct sha2_local::Common<Sha512>;
+private:
+ static const size_t blockSize_ = 128;
+ static const size_t hSize_ = 8;
+ static const size_t msgLenByte_ = 16;
+ uint64_t totalSize_;
+ size_t roundBufSize_;
+ char roundBuf_[blockSize_];
+ uint64_t h_[hSize_];
+ static const size_t outByteSize_ = hSize_ * sizeof(uint64_t);
+ const uint64_t *k_;
+
+ template<size_t i0, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7>
+ void round1(uint64_t *S, const uint64_t *w, size_t i)
+ {
+ using namespace sha2_local;
+ uint64_t& a = S[i0];
+ uint64_t& b = S[i1];
+ uint64_t& c = S[i2];
+ uint64_t& d = S[i3];
+ uint64_t& e = S[i4];
+ uint64_t& f = S[i5];
+ uint64_t& g = S[i6];
+ uint64_t& h = S[i7];
+
+ uint64_t s1 = rot64(e, 14) ^ rot64(e, 18) ^ rot64(e, 41);
+ uint64_t ch = g ^ (e & (f ^ g));
+ uint64_t t0 = h + s1 + ch + k_[i] + w[i];
+ uint64_t s0 = rot64(a, 28) ^ rot64(a, 34) ^ rot64(a, 39);
+ uint64_t maj = ((a | b) & c) | (a & b);
+ uint64_t t1 = s0 + maj;
+ d += t0;
+ h = t0 + t1;
+ }
+ /**
+ @param buf [in] buffer(64byte)
+ */
+ void round(const char *buf)
+ {
+ using namespace sha2_local;
+ uint64_t w[80];
+ for (int i = 0; i < 16; i++) {
+ w[i] = cybozu::Get64bitAsBE(&buf[i * 8]);
+ }
+ for (int i = 16 ; i < 80; i++) {
+ uint64_t t = w[i - 15];
+ uint64_t s0 = rot64(t, 1) ^ rot64(t, 8) ^ (t >> 7);
+ t = w[i - 2];
+ uint64_t s1 = rot64(t, 19) ^ rot64(t, 61) ^ (t >> 6);
+ w[i] = w[i - 16] + s0 + w[i - 7] + s1;
+ }
+ uint64_t s[8];
+ for (int i = 0; i < 8; i++) {
+ s[i] = h_[i];
+ }
+ for (int i = 0; i < 80; i += 8) {
+ round1<0, 1, 2, 3, 4, 5, 6, 7>(s, w, i + 0);
+ round1<7, 0, 1, 2, 3, 4, 5, 6>(s, w, i + 1);
+ round1<6, 7, 0, 1, 2, 3, 4, 5>(s, w, i + 2);
+ round1<5, 6, 7, 0, 1, 2, 3, 4>(s, w, i + 3);
+ round1<4, 5, 6, 7, 0, 1, 2, 3>(s, w, i + 4);
+ round1<3, 4, 5, 6, 7, 0, 1, 2>(s, w, i + 5);
+ round1<2, 3, 4, 5, 6, 7, 0, 1>(s, w, i + 6);
+ round1<1, 2, 3, 4, 5, 6, 7, 0>(s, w, i + 7);
+ }
+ for (int i = 0; i < 8; i++) {
+ h_[i] += s[i];
+ }
+ totalSize_ += blockSize_;
+ }
+public:
+ Sha512()
+ {
+ clear();
+ }
+ void clear()
+ {
+ static const uint64_t kTbl[] = {
+ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL,
+ 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
+ 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
+ 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+ 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL,
+ 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+ 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL,
+ 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+ 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
+ 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
+ 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL,
+ 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+ 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL,
+ 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
+ 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
+ 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
+ };
+ k_ = kTbl;
+ totalSize_ = 0;
+ roundBufSize_ = 0;
+ h_[0] = 0x6a09e667f3bcc908ull;
+ h_[1] = 0xbb67ae8584caa73bull;
+ h_[2] = 0x3c6ef372fe94f82bull;
+ h_[3] = 0xa54ff53a5f1d36f1ull;
+ h_[4] = 0x510e527fade682d1ull;
+ h_[5] = 0x9b05688c2b3e6c1full;
+ h_[6] = 0x1f83d9abfb41bd6bull;
+ h_[7] = 0x5be0cd19137e2179ull;
+ }
+ void update(const void *buf, size_t bufSize)
+ {
+ inner_update(reinterpret_cast<const char*>(buf), bufSize);
+ }
+ size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize)
+ {
+ if (mdSize < outByteSize_) return 0;
+ update(buf, bufSize);
+ term(roundBuf_, roundBufSize_);
+ char *p = reinterpret_cast<char*>(md);
+ for (size_t i = 0; i < hSize_; i++) {
+ cybozu::Set64bitAsBE(&p[i * sizeof(h_[0])], h_[i]);
+ }
+ return outByteSize_;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ void update(const std::string& buf)
+ {
+ update(buf.c_str(), buf.size());
+ }
+ std::string digest(const std::string& buf)
+ {
+ return digest(buf.c_str(), buf.size());
+ }
+ std::string digest(const void *buf, size_t bufSize)
+ {
+ std::string md(outByteSize_, 0);
+ digest(&md[0], md.size(), buf, bufSize);
+ return md;
+ }
+#endif
+};
+
+} // cybozu
+
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/stream.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/stream.hpp
new file mode 100644
index 000000000..bc110bdb0
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/stream.hpp
@@ -0,0 +1,267 @@
+#pragma once
+/**
+ @file
+ @brief stream and line stream class
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#ifndef CYBOZU_DONT_USE_STRING
+#include <string>
+#include <iosfwd>
+#endif
+#include <cybozu/exception.hpp>
+#include <memory.h>
+
+namespace cybozu {
+
+namespace stream_local {
+
+template <typename From, typename To>
+struct is_convertible {
+ typedef char yes;
+ typedef int no;
+
+ static no test(...);
+ static yes test(const To*);
+ static const bool value = sizeof(test(static_cast<const From*>(0))) == sizeof(yes);
+};
+
+template <bool b, class T = void>
+struct enable_if { typedef T type; };
+
+template <class T>
+struct enable_if<false, T> {};
+
+#ifndef CYBOZU_DONT_USE_STRING
+/* specialization for istream */
+template<class InputStream>
+size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if<is_convertible<InputStream, std::istream>::value>::type* = 0)
+{
+ if (size > 0x7fffffff) size = 0x7fffffff;
+ is.read(static_cast<char *>(buf), size);
+ const int64_t readSize = is.gcount();
+ if (readSize < 0) return 0;
+ if (size == 1 && readSize == 0) is.clear();
+ return static_cast<size_t>(readSize);
+}
+
+/* generic version for size_t readSome(void *, size_t) */
+template<class InputStream>
+size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if<!is_convertible<InputStream, std::istream>::value>::type* = 0)
+{
+ return is.readSome(buf, size);
+}
+#else
+template<class InputStream>
+size_t readSome_inner(void *buf, size_t size, InputStream& is)
+{
+ return is.readSome(buf, size);
+}
+#endif
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+/* specialization for ostream */
+template<class OutputStream>
+void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if<is_convertible<OutputStream, std::ostream>::value>::type* = 0)
+{
+ if (!os.write(static_cast<const char *>(buf), size)) throw cybozu::Exception("stream:writeSub") << size;
+}
+#endif
+
+#ifndef CYBOZU_DONT_USE_STRING
+/* generic version for void write(const void*, size_t), which writes all data */
+template<class OutputStream>
+void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if<!is_convertible<OutputStream, std::ostream>::value>::type* = 0)
+{
+ os.write(buf, size);
+}
+
+template<class OutputStream>
+void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if<is_convertible<OutputStream, std::ostream>::value>::type* = 0)
+{
+ *pb = !!os.write(static_cast<const char *>(buf), size);
+}
+
+/* generic version for void write(const void*, size_t), which writes all data */
+template<class OutputStream>
+void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if<!is_convertible<OutputStream, std::ostream>::value>::type* = 0)
+{
+ os.write(pb, buf, size);
+}
+#else
+template<class OutputStream>
+void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size)
+{
+ os.write(pb, buf, size);
+}
+#endif
+
+} // stream_local
+
+/*
+ make a specializaiton of class to use new InputStream, OutputStream
+*/
+template<class InputStream>
+struct InputStreamTag {
+ static size_t readSome(void *buf, size_t size, InputStream& is)
+ {
+ return stream_local::readSome_inner<InputStream>(buf, size, is);
+ }
+ static bool readChar(char *c, InputStream& is)
+ {
+ return readSome(c, 1, is) == 1;
+ }
+};
+
+template<class OutputStream>
+struct OutputStreamTag {
+ static void write(OutputStream& os, const void *buf, size_t size)
+ {
+ stream_local::writeSub<OutputStream>(os, buf, size);
+ }
+};
+
+class MemoryInputStream {
+ const char *p_;
+ size_t size_;
+ size_t pos;
+public:
+ MemoryInputStream(const void *p, size_t size) : p_(static_cast<const char *>(p)), size_(size), pos(0) {}
+ size_t readSome(void *buf, size_t size)
+ {
+ if (size > size_ - pos) size = size_ - pos;
+ memcpy(buf, p_ + pos, size);
+ pos += size;
+ return size;
+ }
+ size_t getPos() const { return pos; }
+};
+
+class MemoryOutputStream {
+ char *p_;
+ size_t size_;
+ size_t pos;
+public:
+ MemoryOutputStream(void *p, size_t size) : p_(static_cast<char *>(p)), size_(size), pos(0) {}
+ void write(bool *pb, const void *buf, size_t size)
+ {
+ if (size > size_ - pos) {
+ *pb = false;
+ return;
+ }
+ memcpy(p_ + pos, buf, size);
+ pos += size;
+ *pb = true;
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void write(const void *buf, size_t size)
+ {
+ bool b;
+ write(&b, buf, size);
+ if (!b) throw cybozu::Exception("MemoryOutputStream:write") << size << size_ << pos;
+ }
+#endif
+ size_t getPos() const { return pos; }
+};
+
+#ifndef CYBOZU_DONT_USE_STRING
+class StringInputStream {
+ const std::string& str_;
+ size_t pos;
+ StringInputStream(const StringInputStream&);
+ void operator=(const StringInputStream&);
+public:
+ explicit StringInputStream(const std::string& str) : str_(str), pos(0) {}
+ size_t readSome(void *buf, size_t size)
+ {
+ const size_t remainSize = str_.size() - pos;
+ if (size > remainSize) size = remainSize;
+ memcpy(buf, &str_[pos], size);
+ pos += size;
+ return size;
+ }
+ size_t getPos() const { return pos; }
+};
+
+class StringOutputStream {
+ std::string& str_;
+ StringOutputStream(const StringOutputStream&);
+ void operator=(const StringOutputStream&);
+public:
+ explicit StringOutputStream(std::string& str) : str_(str) {}
+ void write(bool *pb, const void *buf, size_t size)
+ {
+ str_.append(static_cast<const char *>(buf), size);
+ *pb = true;
+ }
+ void write(const void *buf, size_t size)
+ {
+ str_.append(static_cast<const char *>(buf), size);
+ }
+ size_t getPos() const { return str_.size(); }
+};
+#endif
+
+template<class InputStream>
+size_t readSome(void *buf, size_t size, InputStream& is)
+{
+ return stream_local::readSome_inner(buf, size, is);
+}
+
+template<class OutputStream>
+void write(OutputStream& os, const void *buf, size_t size)
+{
+ stream_local::writeSub(os, buf, size);
+}
+
+template<class OutputStream>
+void write(bool *pb, OutputStream& os, const void *buf, size_t size)
+{
+ stream_local::writeSub(pb, os, buf, size);
+}
+
+template<typename InputStream>
+void read(bool *pb, void *buf, size_t size, InputStream& is)
+{
+ char *p = static_cast<char*>(buf);
+ while (size > 0) {
+ size_t readSize = cybozu::readSome(p, size, is);
+ if (readSize == 0) {
+ *pb = false;
+ return;
+ }
+ p += readSize;
+ size -= readSize;
+ }
+ *pb = true;
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+template<typename InputStream>
+void read(void *buf, size_t size, InputStream& is)
+{
+ bool b;
+ read(&b, buf, size, is);
+ if (!b) throw cybozu::Exception("stream:read");
+}
+#endif
+
+template<class InputStream>
+bool readChar(char *c, InputStream& is)
+{
+ return readSome(c, 1, is) == 1;
+}
+
+template<class OutputStream>
+void writeChar(OutputStream& os, char c)
+{
+ cybozu::write(os, &c, 1);
+}
+
+template<class OutputStream>
+void writeChar(bool *pb, OutputStream& os, char c)
+{
+ cybozu::write(pb, os, &c, 1);
+}
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/test.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/test.hpp
new file mode 100644
index 000000000..7dfffab96
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/test.hpp
@@ -0,0 +1,373 @@
+#pragma once
+/**
+ @file
+ @brief unit test class
+
+ @author MITSUNARI Shigeo(@herumi)
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <string>
+#include <list>
+#include <iostream>
+#include <utility>
+#if defined(_MSC_VER) && (MSC_VER <= 1500)
+ #include <cybozu/inttype.hpp>
+#else
+ #include <stdint.h>
+#endif
+
+namespace cybozu { namespace test {
+
+class AutoRun {
+ typedef void (*Func)();
+ typedef std::list<std::pair<const char*, Func> > UnitTestList;
+public:
+ AutoRun()
+ : init_(0)
+ , term_(0)
+ , okCount_(0)
+ , ngCount_(0)
+ , exceptionCount_(0)
+ {
+ }
+ void setup(Func init, Func term)
+ {
+ init_ = init;
+ term_ = term;
+ }
+ void append(const char *name, Func func)
+ {
+ list_.push_back(std::make_pair(name, func));
+ }
+ void set(bool isOK)
+ {
+ if (isOK) {
+ okCount_++;
+ } else {
+ ngCount_++;
+ }
+ }
+ std::string getBaseName(const std::string& name) const
+ {
+#ifdef _WIN32
+ const char sep = '\\';
+#else
+ const char sep = '/';
+#endif
+ size_t pos = name.find_last_of(sep);
+ std::string ret = name.substr(pos + 1);
+ pos = ret.find('.');
+ return ret.substr(0, pos);
+ }
+ int run(int, char *argv[])
+ {
+ std::string msg;
+ try {
+ if (init_) init_();
+ for (UnitTestList::const_iterator i = list_.begin(), ie = list_.end(); i != ie; ++i) {
+ std::cout << "ctest:module=" << i->first << std::endl;
+ try {
+ (i->second)();
+ } catch (std::exception& e) {
+ exceptionCount_++;
+ std::cout << "ctest: " << i->first << " is stopped by exception " << e.what() << std::endl;
+ } catch (...) {
+ exceptionCount_++;
+ std::cout << "ctest: " << i->first << " is stopped by unknown exception" << std::endl;
+ }
+ }
+ if (term_) term_();
+ } catch (std::exception& e) {
+ msg = std::string("ctest:err:") + e.what();
+ } catch (...) {
+ msg = "ctest:err: catch unknown exception";
+ }
+ fflush(stdout);
+ if (msg.empty()) {
+ int err = ngCount_ + exceptionCount_;
+ int total = okCount_ + err;
+ std::cout << "ctest:name=" << getBaseName(*argv)
+ << ", module=" << list_.size()
+ << ", total=" << total
+ << ", ok=" << okCount_
+ << ", ng=" << ngCount_
+ << ", exception=" << exceptionCount_ << std::endl;
+ return err > 0 ? 1 : 0;
+ } else {
+ std::cout << msg << std::endl;
+ return 1;
+ }
+ }
+ static inline AutoRun& getInstance()
+ {
+ static AutoRun instance;
+ return instance;
+ }
+private:
+ Func init_;
+ Func term_;
+ int okCount_;
+ int ngCount_;
+ int exceptionCount_;
+ UnitTestList list_;
+};
+
+static AutoRun& autoRun = AutoRun::getInstance();
+
+inline void test(bool ret, const std::string& msg, const std::string& param, const char *file, int line)
+{
+ autoRun.set(ret);
+ if (!ret) {
+ printf("%s(%d):ctest:%s(%s);\n", file, line, msg.c_str(), param.c_str());
+ }
+}
+
+template<typename T, typename U>
+bool isEqual(const T& lhs, const U& rhs)
+{
+ return lhs == rhs;
+}
+
+// avoid warning of comparision of integers of different signs
+inline bool isEqual(size_t lhs, int rhs)
+{
+ return lhs == size_t(rhs);
+}
+inline bool isEqual(int lhs, size_t rhs)
+{
+ return size_t(lhs) == rhs;
+}
+inline bool isEqual(const char *lhs, const char *rhs)
+{
+ return strcmp(lhs, rhs) == 0;
+}
+inline bool isEqual(char *lhs, const char *rhs)
+{
+ return strcmp(lhs, rhs) == 0;
+}
+inline bool isEqual(const char *lhs, char *rhs)
+{
+ return strcmp(lhs, rhs) == 0;
+}
+inline bool isEqual(char *lhs, char *rhs)
+{
+ return strcmp(lhs, rhs) == 0;
+}
+// avoid to compare float directly
+inline bool isEqual(float lhs, float rhs)
+{
+ union fi {
+ float f;
+ uint32_t i;
+ } lfi, rfi;
+ lfi.f = lhs;
+ rfi.f = rhs;
+ return lfi.i == rfi.i;
+}
+// avoid to compare double directly
+inline bool isEqual(double lhs, double rhs)
+{
+ union di {
+ double d;
+ uint64_t i;
+ } ldi, rdi;
+ ldi.d = lhs;
+ rdi.d = rhs;
+ return ldi.i == rdi.i;
+}
+
+} } // cybozu::test
+
+#ifndef CYBOZU_TEST_DISABLE_AUTO_RUN
+int main(int argc, char *argv[])
+{
+ return cybozu::test::autoRun.run(argc, argv);
+}
+#endif
+
+/**
+ alert if !x
+ @param x [in]
+*/
+#define CYBOZU_TEST_ASSERT(x) cybozu::test::test(!!(x), "CYBOZU_TEST_ASSERT", #x, __FILE__, __LINE__)
+
+/**
+ alert if x != y
+ @param x [in]
+ @param y [in]
+*/
+#define CYBOZU_TEST_EQUAL(x, y) { \
+ bool _cybozu_eq = cybozu::test::isEqual(x, y); \
+ cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL", #x ", " #y, __FILE__, __LINE__); \
+ if (!_cybozu_eq) { \
+ std::cout << "ctest: lhs=" << (x) << std::endl; \
+ std::cout << "ctest: rhs=" << (y) << std::endl; \
+ } \
+}
+/**
+ alert if fabs(x, y) >= eps
+ @param x [in]
+ @param y [in]
+*/
+#define CYBOZU_TEST_NEAR(x, y, eps) { \
+ bool _cybozu_isNear = fabs((x) - (y)) < eps; \
+ cybozu::test::test(_cybozu_isNear, "CYBOZU_TEST_NEAR", #x ", " #y, __FILE__, __LINE__); \
+ if (!_cybozu_isNear) { \
+ std::cout << "ctest: lhs=" << (x) << std::endl; \
+ std::cout << "ctest: rhs=" << (y) << std::endl; \
+ } \
+}
+
+#define CYBOZU_TEST_EQUAL_POINTER(x, y) { \
+ bool _cybozu_eq = x == y; \
+ cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_POINTER", #x ", " #y, __FILE__, __LINE__); \
+ if (!_cybozu_eq) { \
+ std::cout << "ctest: lhs=" << static_cast<const void*>(x) << std::endl; \
+ std::cout << "ctest: rhs=" << static_cast<const void*>(y) << std::endl; \
+ } \
+}
+/**
+ alert if x[] != y[]
+ @param x [in]
+ @param y [in]
+ @param n [in]
+*/
+#define CYBOZU_TEST_EQUAL_ARRAY(x, y, n) { \
+ for (size_t _cybozu_test_i = 0, _cybozu_ie = (size_t)(n); _cybozu_test_i < _cybozu_ie; _cybozu_test_i++) { \
+ bool _cybozu_eq = cybozu::test::isEqual((x)[_cybozu_test_i], (y)[_cybozu_test_i]); \
+ cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_ARRAY", #x ", " #y ", " #n, __FILE__, __LINE__); \
+ if (!_cybozu_eq) { \
+ std::cout << "ctest: i=" << _cybozu_test_i << std::endl; \
+ std::cout << "ctest: lhs=" << (x)[_cybozu_test_i] << std::endl; \
+ std::cout << "ctest: rhs=" << (y)[_cybozu_test_i] << std::endl; \
+ } \
+ } \
+}
+
+/**
+ always alert
+ @param msg [in]
+*/
+#define CYBOZU_TEST_FAIL(msg) cybozu::test::test(false, "CYBOZU_TEST_FAIL", msg, __FILE__, __LINE__)
+
+/**
+ verify message in exception
+*/
+#define CYBOZU_TEST_EXCEPTION_MESSAGE(statement, Exception, msg) \
+{ \
+ int _cybozu_ret = 0; \
+ std::string _cybozu_errMsg; \
+ try { \
+ statement; \
+ _cybozu_ret = 1; \
+ } catch (const Exception& _cybozu_e) { \
+ _cybozu_errMsg = _cybozu_e.what(); \
+ if (_cybozu_errMsg.find(msg) == std::string::npos) { \
+ _cybozu_ret = 2; \
+ } \
+ } catch (...) { \
+ _cybozu_ret = 3; \
+ } \
+ if (_cybozu_ret) { \
+ cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION_MESSAGE", #statement ", " #Exception ", " #msg, __FILE__, __LINE__); \
+ if (_cybozu_ret == 1) { \
+ std::cout << "ctest: no exception" << std::endl; \
+ } else if (_cybozu_ret == 2) { \
+ std::cout << "ctest: bad exception msg:" << _cybozu_errMsg << std::endl; \
+ } else { \
+ std::cout << "ctest: unexpected exception" << std::endl; \
+ } \
+ } else { \
+ cybozu::test::autoRun.set(true); \
+ } \
+}
+
+#define CYBOZU_TEST_EXCEPTION(statement, Exception) \
+{ \
+ int _cybozu_ret = 0; \
+ try { \
+ statement; \
+ _cybozu_ret = 1; \
+ } catch (const Exception&) { \
+ } catch (...) { \
+ _cybozu_ret = 2; \
+ } \
+ if (_cybozu_ret) { \
+ cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION", #statement ", " #Exception, __FILE__, __LINE__); \
+ if (_cybozu_ret == 1) { \
+ std::cout << "ctest: no exception" << std::endl; \
+ } else { \
+ std::cout << "ctest: unexpected exception" << std::endl; \
+ } \
+ } else { \
+ cybozu::test::autoRun.set(true); \
+ } \
+}
+
+/**
+ verify statement does not throw
+*/
+#define CYBOZU_TEST_NO_EXCEPTION(statement) \
+try { \
+ statement; \
+ cybozu::test::autoRun.set(true); \
+} catch (...) { \
+ cybozu::test::test(false, "CYBOZU_TEST_NO_EXCEPTION", #statement, __FILE__, __LINE__); \
+}
+
+/**
+ append auto unit test
+ @param name [in] module name
+*/
+#define CYBOZU_TEST_AUTO(name) \
+void cybozu_test_ ## name(); \
+struct cybozu_test_local_ ## name { \
+ cybozu_test_local_ ## name() \
+ { \
+ cybozu::test::autoRun.append(#name, cybozu_test_ ## name); \
+ } \
+} cybozu_test_local_instance_ ## name; \
+void cybozu_test_ ## name()
+
+/**
+ append auto unit test with fixture
+ @param name [in] module name
+*/
+#define CYBOZU_TEST_AUTO_WITH_FIXTURE(name, Fixture) \
+void cybozu_test_ ## name(); \
+void cybozu_test_real_ ## name() \
+{ \
+ Fixture f; \
+ cybozu_test_ ## name(); \
+} \
+struct cybozu_test_local_ ## name { \
+ cybozu_test_local_ ## name() \
+ { \
+ cybozu::test::autoRun.append(#name, cybozu_test_real_ ## name); \
+ } \
+} cybozu_test_local_instance_ ## name; \
+void cybozu_test_ ## name()
+
+/**
+ setup fixture
+ @param Fixture [in] class name of fixture
+ @note cstr of Fixture is called before test and dstr of Fixture is called after test
+*/
+#define CYBOZU_TEST_SETUP_FIXTURE(Fixture) \
+Fixture *cybozu_test_local_fixture; \
+void cybozu_test_local_init() \
+{ \
+ cybozu_test_local_fixture = new Fixture(); \
+} \
+void cybozu_test_local_term() \
+{ \
+ delete cybozu_test_local_fixture; \
+} \
+struct cybozu_test_local_fixture_setup_ { \
+ cybozu_test_local_fixture_setup_() \
+ { \
+ cybozu::test::autoRun.setup(cybozu_test_local_init, cybozu_test_local_term); \
+ } \
+} cybozu_test_local_fixture_setup_instance_;
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/unordered_map.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/unordered_map.hpp
new file mode 100644
index 000000000..89f8f8774
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/unordered_map.hpp
@@ -0,0 +1,13 @@
+#pragma once
+
+#include <cybozu/inttype.hpp>
+
+#ifdef CYBOZU_USE_BOOST
+ #include <boost/unordered_map.hpp>
+#elif (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) || (defined __APPLE__)
+ #include <unordered_map>
+#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1)
+ #include <list>
+ #include <tr1/unordered_map>
+#endif
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/cybozu/xorshift.hpp b/vendor/github.com/tangerine-network/mcl/include/cybozu/xorshift.hpp
new file mode 100644
index 000000000..08c6a04f9
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/cybozu/xorshift.hpp
@@ -0,0 +1,189 @@
+#pragma once
+/**
+ @file
+ @brief XorShift
+
+ @author MITSUNARI Shigeo(@herumi)
+ @author MITSUNARI Shigeo
+*/
+#include <cybozu/inttype.hpp>
+#include <assert.h>
+
+namespace cybozu {
+
+namespace xorshift_local {
+
+/*
+ U is uint32_t or uint64_t
+*/
+template<class U, class Gen>
+void read_local(void *p, size_t n, Gen& gen, U (Gen::*f)())
+{
+ uint8_t *dst = static_cast<uint8_t*>(p);
+ const size_t uSize = sizeof(U);
+ assert(uSize == 4 || uSize == 8);
+ union ua {
+ U u;
+ uint8_t a[uSize];
+ };
+
+ while (n >= uSize) {
+ ua ua;
+ ua.u = (gen.*f)();
+ for (size_t i = 0; i < uSize; i++) {
+ dst[i] = ua.a[i];
+ }
+ dst += uSize;
+ n -= uSize;
+ }
+ assert(n < uSize);
+ if (n > 0) {
+ ua ua;
+ ua.u = (gen.*f)();
+ for (size_t i = 0; i < n; i++) {
+ dst[i] = ua.a[i];
+ }
+ }
+}
+
+} // xorshift_local
+
+class XorShift {
+ uint32_t x_, y_, z_, w_;
+public:
+ explicit XorShift(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0)
+ {
+ init(x, y, z, w);
+ }
+ void init(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0)
+ {
+ x_ = x ? x : 123456789;
+ y_ = y ? y : 362436069;
+ z_ = z ? z : 521288629;
+ w_ = w ? w : 88675123;
+ }
+ uint32_t get32()
+ {
+ unsigned int t = x_ ^ (x_ << 11);
+ x_ = y_; y_ = z_; z_ = w_;
+ return w_ = (w_ ^ (w_ >> 19)) ^ (t ^ (t >> 8));
+ }
+ uint32_t operator()() { return get32(); }
+ uint64_t get64()
+ {
+ uint32_t a = get32();
+ uint32_t b = get32();
+ return (uint64_t(a) << 32) | b;
+ }
+ template<class T>
+ void read(bool *pb, T *p, size_t n)
+ {
+ xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift::get32);
+ *pb = true;
+ }
+ template<class T>
+ size_t read(T *p, size_t n)
+ {
+ bool b;
+ read(&b, p, n);
+ (void)b;
+ return n;
+ }
+};
+
+// see http://xorshift.di.unimi.it/xorshift128plus.c
+class XorShift128Plus {
+ uint64_t s_[2];
+ static const uint64_t seed0 = 123456789;
+ static const uint64_t seed1 = 987654321;
+public:
+ explicit XorShift128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1)
+ {
+ init(s0, s1);
+ }
+ void init(uint64_t s0 = seed0, uint64_t s1 = seed1)
+ {
+ s_[0] = s0;
+ s_[1] = s1;
+ }
+ uint32_t get32()
+ {
+ return static_cast<uint32_t>(get64());
+ }
+ uint64_t operator()() { return get64(); }
+ uint64_t get64()
+ {
+ uint64_t s1 = s_[0];
+ const uint64_t s0 = s_[1];
+ s_[0] = s0;
+ s1 ^= s1 << 23;
+ s_[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5);
+ return s_[1] + s0;
+ }
+ template<class T>
+ void read(bool *pb, T *p, size_t n)
+ {
+ xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift128Plus::get64);
+ *pb = true;
+ }
+ template<class T>
+ size_t read(T *p, size_t n)
+ {
+ bool b;
+ read(&b, p, n);
+ (void)b;
+ return n;
+ }
+};
+
+// see http://xoroshiro.di.unimi.it/xoroshiro128plus.c
+class Xoroshiro128Plus {
+ uint64_t s_[2];
+ static const uint64_t seed0 = 123456789;
+ static const uint64_t seed1 = 987654321;
+ uint64_t rotl(uint64_t x, unsigned int k) const
+ {
+ return (x << k) | (x >> (64 - k));
+ }
+public:
+ explicit Xoroshiro128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1)
+ {
+ init(s0, s1);
+ }
+ void init(uint64_t s0 = seed0, uint64_t s1 = seed1)
+ {
+ s_[0] = s0;
+ s_[1] = s1;
+ }
+ uint32_t get32()
+ {
+ return static_cast<uint32_t>(get64());
+ }
+ uint64_t operator()() { return get64(); }
+ uint64_t get64()
+ {
+ uint64_t s0 = s_[0];
+ uint64_t s1 = s_[1];
+ uint64_t result = s0 + s1;
+ s1 ^= s0;
+ s_[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14);
+ s_[1] = rotl(s1, 36);
+ return result;
+ }
+ template<class T>
+ void read(bool *pb, T *p, size_t n)
+ {
+ xorshift_local::read_local(p, n * sizeof(T), *this, &Xoroshiro128Plus::get64);
+ *pb = true;
+ }
+ template<class T>
+ size_t read(T *p, size_t n)
+ {
+ bool b;
+ read(&b, p, n);
+ (void)b;
+ return n;
+ }
+};
+
+} // cybozu
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/aggregate_sig.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/aggregate_sig.hpp
new file mode 100644
index 000000000..f31405705
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/aggregate_sig.hpp
@@ -0,0 +1,265 @@
+#pragma once
+/**
+ @file
+ @brief aggregate signature
+ @author MITSUNARI Shigeo(@herumi)
+ see http://crypto.stanford.edu/~dabo/papers/aggreg.pdf
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <cmath>
+#include <vector>
+#include <iosfwd>
+#include <set>
+#ifndef MCLBN_FP_UNIT_SIZE
+ #define MCLBN_FP_UNIT_SIZE 4
+#endif
+#if MCLBN_FP_UNIT_SIZE == 4
+#include <mcl/bn256.hpp>
+namespace mcl {
+using namespace mcl::bn256;
+}
+#elif MCLBN_FP_UNIT_SIZE == 6
+#include <mcl/bn384.hpp>
+namespace mcl {
+using namespace mcl::bn384;
+}
+#elif MCLBN_FP_UNIT_SIZE == 8
+#include <mcl/bn512.hpp>
+namespace mcl {
+using namespace mcl::bn512;
+}
+#else
+ #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8"
+#endif
+
+namespace mcl { namespace aggs {
+
+/*
+ AGGregate Signature Template class
+*/
+template<size_t dummyImpl = 0>
+struct AGGST {
+ typedef typename G1::BaseFp Fp;
+
+ class SecretKey;
+ class PublicKey;
+ class Signature;
+
+ static G1 P_;
+ static G2 Q_;
+ static std::vector<Fp6> Qcoeff_;
+public:
+ static void init(const mcl::CurveParam& cp = mcl::BN254)
+ {
+ initPairing(cp);
+ hashAndMapToG1(P_, "0");
+ hashAndMapToG2(Q_, "0");
+ precomputeG2(Qcoeff_, Q_);
+ }
+ class Signature : public fp::Serializable<Signature> {
+ G1 S_;
+ friend class SecretKey;
+ friend class PublicKey;
+ public:
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ S_.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ S_.save(os, ioMode);
+ }
+ friend std::istream& operator>>(std::istream& is, Signature& self)
+ {
+ self.load(is, fp::detectIoMode(G1::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const Signature& self)
+ {
+ self.save(os, fp::detectIoMode(G1::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const Signature& rhs) const
+ {
+ return S_ == rhs.S_;
+ }
+ bool operator!=(const Signature& rhs) const { return !operator==(rhs); }
+ /*
+ aggregate sig[0..n) and set *this
+ */
+ void aggregate(const Signature *sig, size_t n)
+ {
+ G1 S;
+ S.clear();
+ for (size_t i = 0; i < n; i++) {
+ S += sig[i].S_;
+ }
+ S_ = S;
+ }
+ void aggregate(const std::vector<Signature>& sig)
+ {
+ aggregate(sig.data(), sig.size());
+ }
+ /*
+ aggregate verification
+ */
+ bool verify(const void *const *msgVec, const size_t *sizeVec, const PublicKey *pubVec, size_t n) const
+ {
+ if (n == 0) return false;
+ typedef std::set<Fp> FpSet;
+ FpSet msgSet;
+ typedef std::vector<G1> G1Vec;
+ G1Vec hv(n);
+ for (size_t i = 0; i < n; i++) {
+ Fp h;
+ h.setHashOf(msgVec[i], sizeVec[i]);
+ std::pair<typename FpSet::iterator, bool> ret = msgSet.insert(h);
+ if (!ret.second) throw cybozu::Exception("aggs::verify:same msg");
+ mapToG1(hv[i], h);
+ }
+ /*
+ e(aggSig, xQ) = prod_i e(hv[i], pub[i].Q)
+ <=> finalExp(e(-aggSig, xQ) * prod_i millerLoop(hv[i], pub[i].xQ)) == 1
+ */
+ GT e1, e2;
+ precomputedMillerLoop(e1, -S_, Qcoeff_);
+ millerLoop(e2, hv[0], pubVec[0].xQ_);
+ for (size_t i = 1; i < n; i++) {
+ GT e;
+ millerLoop(e, hv[i], pubVec[i].xQ_);
+ e2 *= e;
+ }
+ e1 *= e2;
+ finalExp(e1, e1);
+ return e1.isOne();
+ }
+ bool verify(const std::vector<std::string>& msgVec, const std::vector<PublicKey>& pubVec) const
+ {
+ const size_t n = msgVec.size();
+ if (n != pubVec.size()) throw cybozu::Exception("aggs:Signature:verify:bad size") << msgVec.size() << pubVec.size();
+ if (n == 0) return false;
+ std::vector<const void*> mv(n);
+ std::vector<size_t> sv(n);
+ for (size_t i = 0; i < n; i++) {
+ mv[i] = msgVec[i].c_str();
+ sv[i] = msgVec[i].size();
+ }
+ return verify(&mv[0], &sv[0], &pubVec[0], n);
+ }
+ };
+ class PublicKey : public fp::Serializable<PublicKey> {
+ G2 xQ_;
+ friend class SecretKey;
+ friend class Signature;
+ public:
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ xQ_.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ xQ_.save(os, ioMode);
+ }
+ friend std::istream& operator>>(std::istream& is, PublicKey& self)
+ {
+ self.load(is, fp::detectIoMode(G2::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const PublicKey& self)
+ {
+ self.save(os, fp::detectIoMode(G2::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const PublicKey& rhs) const
+ {
+ return xQ_ == rhs.xQ_;
+ }
+ bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); }
+ bool verify(const Signature& sig, const void *m, size_t mSize) const
+ {
+ /*
+ H = hash(m)
+ e(S, Q) = e(H, xQ) where S = xH
+ <=> e(S, Q)e(-H, xQ) = 1
+ <=> finalExp(millerLoop(S, Q)e(-H, x)) = 1
+ */
+ G1 H;
+ hashAndMapToG1(H, m, mSize);
+ G1::neg(H, H);
+ GT e1, e2;
+ precomputedMillerLoop(e1, sig.S_, Qcoeff_);
+ millerLoop(e2, H, xQ_);
+ e1 *= e2;
+ finalExp(e1, e1);
+ return e1.isOne();
+ }
+ bool verify(const Signature& sig, const std::string& m) const
+ {
+ return verify(sig, m.c_str(), m.size());
+ }
+ };
+ class SecretKey : public fp::Serializable<SecretKey> {
+ Fr x_;
+ friend class PublicKey;
+ friend class Signature;
+ public:
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ x_.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ x_.save(os, ioMode);
+ }
+ friend std::istream& operator>>(std::istream& is, SecretKey& self)
+ {
+ self.load(is, fp::detectIoMode(Fr::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const SecretKey& self)
+ {
+ self.save(os, fp::detectIoMode(Fr::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const SecretKey& rhs) const
+ {
+ return x_ == rhs.x_;
+ }
+ bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); }
+ void init()
+ {
+ x_.setByCSPRNG();
+ }
+ void getPublicKey(PublicKey& pub) const
+ {
+ G2::mul(pub.xQ_, Q_, x_);
+ }
+ void sign(Signature& sig, const void *m, size_t mSize) const
+ {
+ hashAndMapToG1(sig.S_, m, mSize);
+ G1::mul(sig.S_, sig.S_, x_);
+ }
+ void sign(Signature& sig, const std::string& m) const
+ {
+ sign(sig, m.c_str(), m.size());
+ }
+ };
+};
+
+template<size_t dummyImpl> G1 AGGST<dummyImpl>::P_;
+template<size_t dummyImpl> G2 AGGST<dummyImpl>::Q_;
+template<size_t dummyImpl> std::vector<Fp6> AGGST<dummyImpl>::Qcoeff_;
+
+typedef AGGST<> AGGS;
+typedef AGGS::SecretKey SecretKey;
+typedef AGGS::PublicKey PublicKey;
+typedef AGGS::Signature Signature;
+
+} } // mcl::aggs
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/ahe.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/ahe.hpp
new file mode 100644
index 000000000..239319d0d
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/ahe.hpp
@@ -0,0 +1,76 @@
+#pragma once
+/**
+ @file
+ @brief 192/256-bit additive homomorphic encryption by lifted-ElGamal
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/elgamal.hpp>
+#include <mcl/ecparam.hpp>
+
+namespace mcl {
+
+#ifdef MCL_USE_AHE192
+namespace ahe192 {
+
+const mcl::EcParam& para = mcl::ecparam::NIST_P192;
+
+typedef mcl::FpT<mcl::FpTag, 192> Fp;
+typedef mcl::FpT<mcl::ZnTag, 192> Zn;
+typedef mcl::EcT<Fp> Ec;
+typedef mcl::ElgamalT<Ec, Zn> ElgamalEc;
+typedef ElgamalEc::PrivateKey SecretKey;
+typedef ElgamalEc::PublicKey PublicKey;
+typedef ElgamalEc::CipherText CipherText;
+
+static inline void initAhe()
+{
+ Fp::init(para.p);
+ Zn::init(para.n);
+ Ec::init(para.a, para.b);
+ Ec::setIoMode(16);
+ Zn::setIoMode(16);
+}
+
+static inline void initSecretKey(SecretKey& sec)
+{
+ const Ec P(Fp(para.gx), Fp(para.gy));
+ sec.init(P, Zn::getBitSize());
+}
+
+} //mcl::ahe192
+#endif
+
+#ifdef MCL_USE_AHE256
+namespace ahe256 {
+
+const mcl::EcParam& para = mcl::ecparam::NIST_P256;
+
+typedef mcl::FpT<mcl::FpTag, 256> Fp;
+typedef mcl::FpT<mcl::ZnTag, 256> Zn;
+typedef mcl::EcT<Fp> Ec;
+typedef mcl::ElgamalT<Ec, Zn> ElgamalEc;
+typedef ElgamalEc::PrivateKey SecretKey;
+typedef ElgamalEc::PublicKey PublicKey;
+typedef ElgamalEc::CipherText CipherText;
+
+static inline void initAhe()
+{
+ Fp::init(para.p);
+ Zn::init(para.n);
+ Ec::init(para.a, para.b);
+ Ec::setIoMode(16);
+ Zn::setIoMode(16);
+}
+
+static inline void initSecretKey(SecretKey& sec)
+{
+ const Ec P(Fp(para.gx), Fp(para.gy));
+ sec.init(P, Zn::getBitSize());
+}
+
+} //mcl::ahe256
+#endif
+
+} // mcl
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/array.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/array.hpp
new file mode 100644
index 000000000..a6d2a8fa3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/array.hpp
@@ -0,0 +1,167 @@
+#pragma once
+/**
+ @file
+ @brief tiny vector class
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <stdlib.h>
+#include <stddef.h>
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+#include <new>
+#endif
+
+namespace mcl {
+
+template<class T>
+class Array {
+ T *p_;
+ size_t n_;
+ template<class U>
+ void swap_(U& x, U& y) const
+ {
+ U t;
+ t = x;
+ x = y;
+ y = t;
+ }
+public:
+ Array() : p_(0), n_(0) {}
+ ~Array()
+ {
+ free(p_);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ Array(const Array& rhs)
+ : p_(0)
+ , n_(0)
+ {
+ if (rhs.n_ == 0) return;
+ p_ = (T*)malloc(sizeof(T) * rhs.n_);
+ if (p_ == 0) throw std::bad_alloc();
+ n_ = rhs.n_;
+ for (size_t i = 0; i < n_; i++) {
+ p_[i] = rhs.p_[i];
+ }
+ }
+ Array& operator=(const Array& rhs)
+ {
+ Array tmp(rhs);
+ tmp.swap(*this);
+ return *this;
+ }
+#endif
+ bool resize(size_t n)
+ {
+ if (n <= n_) {
+ n_ = n;
+ if (n == 0) {
+ free(p_);
+ p_ = 0;
+ }
+ return true;
+ }
+ T *q = (T*)malloc(sizeof(T) * n);
+ if (q == 0) return false;
+ for (size_t i = 0; i < n_; i++) {
+ q[i] = p_[i];
+ }
+ free(p_);
+ p_ = q;
+ n_ = n;
+ return true;
+ }
+ bool copy(const Array<T>& rhs)
+ {
+ if (this == &rhs) return true;
+ if (n_ < rhs.n_) {
+ clear();
+ if (!resize(rhs.n_)) return false;
+ }
+ for (size_t i = 0; i < rhs.n_; i++) {
+ p_[i] = rhs.p_[i];
+ }
+ n_ = rhs.n_;
+ return true;
+ }
+ void clear()
+ {
+ free(p_);
+ p_ = 0;
+ n_ = 0;
+ }
+ size_t size() const { return n_; }
+ void swap(Array<T>& rhs)
+ {
+ swap_(p_, rhs.p_);
+ swap_(n_, rhs.n_);
+ }
+ T& operator[](size_t n) { return p_[n]; }
+ const T& operator[](size_t n) const { return p_[n]; }
+ T* data() { return p_; }
+ const T* data() const { return p_; }
+};
+
+template<class T, size_t maxSize>
+class FixedArray {
+ T p_[maxSize];
+ size_t n_;
+ FixedArray(const FixedArray&);
+ void operator=(const FixedArray&);
+ template<class U>
+ void swap_(U& x, U& y) const
+ {
+ U t;
+ t = x;
+ x = y;
+ y = t;
+ }
+public:
+ FixedArray() : n_(0) {}
+ bool resize(size_t n)
+ {
+ if (n > maxSize) return false;
+ n_ = n;
+ return true;
+ }
+ bool copy(const FixedArray<T, maxSize>& rhs)
+ {
+ if (this == &rhs) return true;
+ for (size_t i = 0; i < rhs.n_; i++) {
+ p_[i] = rhs.p_[i];
+ }
+ n_ = rhs.n_;
+ return true;
+ }
+ void clear()
+ {
+ n_ = 0;
+ }
+ size_t size() const { return n_; }
+ void swap(FixedArray<T, maxSize>& rhs)
+ {
+ T *minP = p_;
+ size_t minN = n_;
+ T *maxP = rhs.p_;
+ size_t maxN = rhs.n_;
+ if (minP > maxP) {
+ swap_(minP, maxP);
+ swap_(minN, maxN);
+ }
+ for (size_t i = 0; i < minN; i++) {
+ swap_(minP[i], maxP[i]);
+ }
+ for (size_t i = minN; i < maxN; i++) {
+ minP[i] = maxP[i];
+ }
+ swap_(n_, rhs.n_);
+ }
+ T& operator[](size_t n) { return p_[n]; }
+ const T& operator[](size_t n) const { return p_[n]; }
+ T* data() { return p_; }
+ const T* data() const { return p_; }
+};
+
+} // mcl
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bls12_381.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/bls12_381.hpp
new file mode 100644
index 000000000..316e142af
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bls12_381.hpp
@@ -0,0 +1,15 @@
+#pragma once
+/**
+ @file
+ @brief preset class for BLS12-381 pairing
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#define MCL_MAX_FP_BIT_SIZE 384
+#define MCL_MAX_FR_BIT_SIZE 256
+#include <mcl/bn.hpp>
+
+namespace mcl { namespace bls12 {
+using namespace mcl::bn;
+} }
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bn.h b/vendor/github.com/tangerine-network/mcl/include/mcl/bn.h
new file mode 100644
index 000000000..0a31d5501
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bn.h
@@ -0,0 +1,428 @@
+#pragma once
+/**
+ @file
+ @brief C interface of 256/384-bit optimal ate pairing over BN curves
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+/*
+ the order of an elliptic curve over Fp is Fr
+*/
+#ifndef MCLBN_FP_UNIT_SIZE
+ #error "define MCLBN_FP_UNIT_SIZE 4(, 6 or 8)"
+#endif
+#ifndef MCLBN_FR_UNIT_SIZE
+ #define MCLBN_FR_UNIT_SIZE MCLBN_FP_UNIT_SIZE
+#endif
+#define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE))
+
+#include <stdint.h> // for uint64_t, uint8_t
+#include <stdlib.h> // for size_t
+
+
+#if defined(_MSC_VER)
+ #ifdef MCLBN_DONT_EXPORT
+ #define MCLBN_DLL_API
+ #else
+ #ifdef MCLBN_DLL_EXPORT
+ #define MCLBN_DLL_API __declspec(dllexport)
+ #else
+ #define MCLBN_DLL_API __declspec(dllimport)
+ #endif
+ #endif
+ #ifndef MCLBN_NO_AUTOLINK
+ #if MCLBN_FP_UNIT_SIZE == 4
+ #pragma comment(lib, "mclbn256.lib")
+ #elif MCLBN_FP_UNIT_SIZE == 6
+ #pragma comment(lib, "mclbn384.lib")
+ #else
+ #pragma comment(lib, "mclbn512.lib")
+ #endif
+ #endif
+#elif defined(__EMSCRIPTEN__) && !defined(MCLBN_DONT_EXPORT)
+ #define MCLBN_DLL_API __attribute__((used))
+#elif defined(__wasm__) && !defined(MCLBN_DONT_EXPORT)
+ #define MCLBN_DLL_API __attribute__((visibility("default")))
+#else
+ #define MCLBN_DLL_API
+#endif
+
+#ifdef __EMSCRIPTEN__
+ // avoid 64-bit integer
+ #define mclSize unsigned int
+ #define mclInt int
+#else
+ // use #define for cgo
+ #define mclSize size_t
+ #define mclInt int64_t
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef MCLBN_NOT_DEFINE_STRUCT
+
+typedef struct mclBnFr mclBnFr;
+typedef struct mclBnG1 mclBnG1;
+typedef struct mclBnG2 mclBnG2;
+typedef struct mclBnGT mclBnGT;
+typedef struct mclBnFp mclBnFp;
+typedef struct mclBnFp2 mclBnFp2;
+
+#else
+
+typedef struct {
+ uint64_t d[MCLBN_FR_UNIT_SIZE];
+} mclBnFr;
+
+typedef struct {
+ uint64_t d[MCLBN_FP_UNIT_SIZE * 3];
+} mclBnG1;
+
+typedef struct {
+ uint64_t d[MCLBN_FP_UNIT_SIZE * 2 * 3];
+} mclBnG2;
+
+typedef struct {
+ uint64_t d[MCLBN_FP_UNIT_SIZE * 12];
+} mclBnGT;
+
+typedef struct {
+ uint64_t d[MCLBN_FP_UNIT_SIZE];
+} mclBnFp;
+
+typedef struct {
+ mclBnFp d[2];
+} mclBnFp2;
+
+#endif
+
+#include <mcl/curve_type.h>
+
+#define MCLBN_IO_SERIALIZE_HEX_STR 2048
+// for backword compatibility
+enum {
+ mclBn_CurveFp254BNb = 0,
+ mclBn_CurveFp382_1 = 1,
+ mclBn_CurveFp382_2 = 2,
+ mclBn_CurveFp462 = 3,
+ mclBn_CurveSNARK1 = 4,
+ mclBls12_CurveFp381 = 5
+};
+
+// return 0xABC which means A.BC
+MCLBN_DLL_API int mclBn_getVersion();
+/*
+ init library
+ @param curve [in] type of bn curve
+ @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR,
+ which macro is used to make sure that the values
+ are the same when the library is built and used
+ @return 0 if success
+ curve = BN254/BN_SNARK1 is allowed if maxUnitSize = 4
+ curve = BN381_1/BN381_2/BLS12_381 are allowed if maxUnitSize = 6
+ This parameter is used to detect a library compiled with different MCLBN_FP_UNIT_SIZE for safety.
+ @note not threadsafe
+ @note BN_init is used in libeay32
+*/
+MCLBN_DLL_API int mclBn_init(int curve, int compiledTimeVar);
+
+
+/*
+ pairing : G1 x G2 -> GT
+ #G1 = #G2 = r
+ G1 is a curve defined on Fp
+
+ serialized size of elements
+ |Fr| |Fp|
+ BN254 32 32
+ BN381 48 48
+ BLS12_381 32 48
+ BN462 58 58
+ |G1| = |Fp|
+ |G2| = |G1| * 2
+ |GT| = |G1| * 12
+*/
+/*
+ return the num of Unit(=uint64_t) to store Fr
+*/
+MCLBN_DLL_API int mclBn_getOpUnitSize(void);
+
+/*
+ return bytes for serialized G1(=Fp)
+*/
+MCLBN_DLL_API int mclBn_getG1ByteSize(void);
+/*
+ return bytes for serialized Fr
+*/
+MCLBN_DLL_API int mclBn_getFrByteSize(void);
+/*
+ return bytes for serialized Fp
+*/
+MCLBN_DLL_API int mclBn_getFpByteSize(void);
+
+/*
+ return decimal string of the order of the curve(=the characteristic of Fr)
+ return str(buf) if success
+*/
+MCLBN_DLL_API mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize);
+
+/*
+ return decimal string of the characteristic of Fp
+ return str(buf) if success
+*/
+MCLBN_DLL_API mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize);
+
+////////////////////////////////////////////////
+/*
+ deserialize
+ return read size if success else 0
+*/
+MCLBN_DLL_API mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize);
+
+/*
+ serialize
+ return written byte if sucess else 0
+*/
+MCLBN_DLL_API mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x);
+MCLBN_DLL_API mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x);
+MCLBN_DLL_API mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x);
+MCLBN_DLL_API mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x);
+MCLBN_DLL_API mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x);
+MCLBN_DLL_API mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x);
+
+/*
+ set string
+ ioMode
+ 10 : decimal number
+ 16 : hexadecimal number
+ MCLBN_IO_SERIALIZE_HEX_STR : hex string of serialized data
+ return 0 if success else -1
+*/
+MCLBN_DLL_API int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode);
+MCLBN_DLL_API int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode);
+MCLBN_DLL_API int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode);
+MCLBN_DLL_API int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode);
+MCLBN_DLL_API int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode);
+
+/*
+ buf is terminated by '\0'
+ return strlen(buf) if sucess else 0
+*/
+MCLBN_DLL_API mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode);
+MCLBN_DLL_API mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode);
+MCLBN_DLL_API mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode);
+MCLBN_DLL_API mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode);
+MCLBN_DLL_API mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode);
+
+// set zero
+MCLBN_DLL_API void mclBnFr_clear(mclBnFr *x);
+MCLBN_DLL_API void mclBnFp_clear(mclBnFp *x);
+MCLBN_DLL_API void mclBnFp2_clear(mclBnFp2 *x);
+
+// set x to y
+MCLBN_DLL_API void mclBnFr_setInt(mclBnFr *y, mclInt x);
+MCLBN_DLL_API void mclBnFr_setInt32(mclBnFr *y, int x);
+
+// x = buf & (1 << bitLen(r)) - 1
+// if (x >= r) x &= (1 << (bitLen(r) - 1)) - 1
+// always return 0
+MCLBN_DLL_API int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize);
+
+// set (buf mod r) to x
+// return 0 if bufSize <= (byte size of Fr * 2) else -1
+MCLBN_DLL_API int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize);
+// set (buf mod p) to x
+// return 0 if bufSize <= (byte size of Fp * 2) else -1
+MCLBN_DLL_API int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize);
+
+// return 1 if true and 0 otherwise
+MCLBN_DLL_API int mclBnFr_isValid(const mclBnFr *x);
+MCLBN_DLL_API int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y);
+MCLBN_DLL_API int mclBnFr_isZero(const mclBnFr *x);
+MCLBN_DLL_API int mclBnFr_isOne(const mclBnFr *x);
+
+MCLBN_DLL_API int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y);
+MCLBN_DLL_API int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y);
+
+#ifndef MCL_DONT_USE_CSRPNG
+// return 0 if success
+MCLBN_DLL_API int mclBnFr_setByCSPRNG(mclBnFr *x);
+
+/*
+ set user-defined random function for setByCSPRNG
+ @param self [in] user-defined pointer
+ @param readFunc [in] user-defined function,
+ which writes random bufSize bytes to buf and returns bufSize if success else returns 0
+ @note if self == 0 and readFunc == 0 then set default random function
+ @note not threadsafe
+*/
+MCLBN_DLL_API void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize));
+#endif
+
+// hash(s) and set x
+// return 0 if success
+MCLBN_DLL_API int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize);
+MCLBN_DLL_API int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize);
+
+// map x to y
+// return 0 if success else -1
+MCLBN_DLL_API int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x);
+MCLBN_DLL_API int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x);
+
+MCLBN_DLL_API void mclBnFr_neg(mclBnFr *y, const mclBnFr *x);
+MCLBN_DLL_API void mclBnFr_inv(mclBnFr *y, const mclBnFr *x);
+MCLBN_DLL_API void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x);
+MCLBN_DLL_API void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y);
+MCLBN_DLL_API void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y);
+MCLBN_DLL_API void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y);
+MCLBN_DLL_API void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y);
+
+////////////////////////////////////////////////
+// set zero
+MCLBN_DLL_API void mclBnG1_clear(mclBnG1 *x);
+
+
+// return 1 if true and 0 otherwise
+MCLBN_DLL_API int mclBnG1_isValid(const mclBnG1 *x);
+MCLBN_DLL_API int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y);
+MCLBN_DLL_API int mclBnG1_isZero(const mclBnG1 *x);
+/*
+ return 1 if x has a correct order
+ x is valid point of G1 if and only if
+ mclBnG1_isValid() is true, which contains mclBnG1_isValidOrder() if mclBn_verifyOrderG1(true)
+ mclBnG1_isValid() && mclBnG1_isValidOrder() is true if mclBn_verifyOrderG1(false)
+*/
+MCLBN_DLL_API int mclBnG1_isValidOrder(const mclBnG1 *x);
+
+MCLBN_DLL_API int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize);
+
+
+MCLBN_DLL_API void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x);
+MCLBN_DLL_API void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x);
+MCLBN_DLL_API void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x);
+MCLBN_DLL_API void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y);
+MCLBN_DLL_API void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y);
+MCLBN_DLL_API void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y);
+
+/*
+ constant time mul
+*/
+MCLBN_DLL_API void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y);
+
+////////////////////////////////////////////////
+// set zero
+MCLBN_DLL_API void mclBnG2_clear(mclBnG2 *x);
+
+// return 1 if true and 0 otherwise
+MCLBN_DLL_API int mclBnG2_isValid(const mclBnG2 *x);
+MCLBN_DLL_API int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y);
+MCLBN_DLL_API int mclBnG2_isZero(const mclBnG2 *x);
+// return 1 if x has a correct order
+MCLBN_DLL_API int mclBnG2_isValidOrder(const mclBnG2 *x);
+
+MCLBN_DLL_API int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize);
+
+// return written size if sucess else 0
+
+MCLBN_DLL_API void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x);
+MCLBN_DLL_API void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x);
+MCLBN_DLL_API void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x);
+MCLBN_DLL_API void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y);
+MCLBN_DLL_API void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y);
+MCLBN_DLL_API void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y);
+/*
+ constant time mul
+*/
+MCLBN_DLL_API void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y);
+
+////////////////////////////////////////////////
+// set zero
+MCLBN_DLL_API void mclBnGT_clear(mclBnGT *x);
+// set x to y
+MCLBN_DLL_API void mclBnGT_setInt(mclBnGT *y, mclInt x);
+MCLBN_DLL_API void mclBnGT_setInt32(mclBnGT *y, int x);
+
+// return 1 if true and 0 otherwise
+MCLBN_DLL_API int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y);
+MCLBN_DLL_API int mclBnGT_isZero(const mclBnGT *x);
+MCLBN_DLL_API int mclBnGT_isOne(const mclBnGT *x);
+
+MCLBN_DLL_API void mclBnGT_neg(mclBnGT *y, const mclBnGT *x);
+MCLBN_DLL_API void mclBnGT_inv(mclBnGT *y, const mclBnGT *x);
+MCLBN_DLL_API void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x);
+MCLBN_DLL_API void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y);
+MCLBN_DLL_API void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y);
+MCLBN_DLL_API void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y);
+MCLBN_DLL_API void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y);
+
+/*
+ pow for all elements of Fp12
+*/
+MCLBN_DLL_API void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y);
+/*
+ pow for only {x|x^r = 1} in Fp12 by GLV method
+ the value generated by pairing satisfies the condition
+*/
+MCLBN_DLL_API void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y);
+
+MCLBN_DLL_API void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y);
+MCLBN_DLL_API void mclBn_finalExp(mclBnGT *y, const mclBnGT *x);
+MCLBN_DLL_API void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y);
+
+// return precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t)
+MCLBN_DLL_API int mclBn_getUint64NumToPrecompute(void);
+
+// allocate Qbuf[MCLBN_getUint64NumToPrecompute()] before calling this
+MCLBN_DLL_API void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q);
+
+MCLBN_DLL_API void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf);
+MCLBN_DLL_API void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf);
+MCLBN_DLL_API void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf);
+
+/*
+ Lagrange interpolation
+ recover out = y(0) by { (xVec[i], yVec[i]) }
+ return 0 if success else -1
+ @note *out = yVec[0] if k = 1
+ @note k >= 2, xVec[i] != 0, xVec[i] != xVec[j] for i != j
+*/
+MCLBN_DLL_API int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k);
+MCLBN_DLL_API int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k);
+MCLBN_DLL_API int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k);
+
+/*
+ evaluate polynomial
+ out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1)
+ @note cSize >= 2
+*/
+MCLBN_DLL_API int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x);
+MCLBN_DLL_API int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x);
+MCLBN_DLL_API int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x);
+
+/*
+ verify whether a point of an elliptic curve has order r
+ This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12
+ @param doVerify [in] does not verify if zero(default 1)
+*/
+MCLBN_DLL_API void mclBn_verifyOrderG1(int doVerify);
+MCLBN_DLL_API void mclBn_verifyOrderG2(int doVerify);
+
+/*
+ EXPERIMENTAL
+ only for curve = MCL_SECP* or MCL_NIST*
+ return standard base point of the current elliptic curve
+*/
+MCLBN_DLL_API int mclBnG1_getBasePoint(mclBnG1 *x);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bn.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/bn.hpp
new file mode 100644
index 000000000..5ebe5d956
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bn.hpp
@@ -0,0 +1,2261 @@
+#pragma once
+/**
+ @file
+ @brief optimal ate pairing over BN-curve / BLS12-curve
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/fp_tower.hpp>
+#include <mcl/ec.hpp>
+#include <mcl/curve_type.h>
+#include <assert.h>
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+#include <vector>
+#endif
+
+/*
+ set bit size of Fp and Fr
+*/
+#ifndef MCL_MAX_FP_BIT_SIZE
+ #define MCL_MAX_FP_BIT_SIZE 256
+#endif
+
+#ifndef MCL_MAX_FR_BIT_SIZE
+ #define MCL_MAX_FR_BIT_SIZE MCL_MAX_FP_BIT_SIZE
+#endif
+namespace mcl {
+
+struct CurveParam {
+ /*
+ y^2 = x^3 + b
+ i^2 = -1
+ xi = xi_a + i
+ v^3 = xi
+ w^2 = v
+ */
+ const char *z;
+ int b; // y^2 = x^3 + b
+ int xi_a; // xi = xi_a + i
+ /*
+ BN254, BN381 : Dtype
+ BLS12-381 : Mtype
+ */
+ bool isMtype;
+ int curveType; // same in curve_type.h
+ bool operator==(const CurveParam& rhs) const
+ {
+ return strcmp(z, rhs.z) == 0 && b == rhs.b && xi_a == rhs.xi_a && isMtype == rhs.isMtype;
+ }
+ bool operator!=(const CurveParam& rhs) const { return !operator==(rhs); }
+};
+
+const CurveParam BN254 = { "-0x4080000000000001", 2, 1, false, MCL_BN254 }; // -(2^62 + 2^55 + 1)
+// provisional(experimental) param with maxBitSize = 384
+const CurveParam BN381_1 = { "-0x400011000000000000000001", 2, 1, false, MCL_BN381_1 }; // -(2^94 + 2^76 + 2^72 + 1) // A Family of Implementation-Friendly BN Elliptic Curves
+const CurveParam BN381_2 = { "-0x400040090001000000000001", 2, 1, false, MCL_BN381_2 }; // -(2^94 + 2^78 + 2^67 + 2^64 + 2^48 + 1) // used in relic-toolkit
+const CurveParam BN462 = { "0x4001fffffffffffffffffffffbfff", 5, 2, false, MCL_BN462 }; // 2^114 + 2^101 - 2^14 - 1 // https://eprint.iacr.org/2017/334
+const CurveParam BN_SNARK1 = { "4965661367192848881", 3, 9, false, MCL_BN_SNARK1 };
+const CurveParam BLS12_381 = { "-0xd201000000010000", 4, 1, true, MCL_BLS12_381 };
+const CurveParam BN160 = { "0x4000000031", 3, 4, false, MCL_BN160 };
+
+inline const CurveParam& getCurveParam(int type)
+{
+ switch (type) {
+ case MCL_BN254: return mcl::BN254;
+ case MCL_BN381_1: return mcl::BN381_1;
+ case MCL_BN381_2: return mcl::BN381_2;
+ case MCL_BN462: return mcl::BN462;
+ case MCL_BN_SNARK1: return mcl::BN_SNARK1;
+ case MCL_BLS12_381: return mcl::BLS12_381;
+ case MCL_BN160: return mcl::BN160;
+ default:
+ assert(0);
+ return mcl::BN254;
+ }
+}
+
+namespace bn {
+
+namespace local {
+struct FpTag;
+struct FrTag;
+}
+
+typedef mcl::FpT<local::FpTag, MCL_MAX_FP_BIT_SIZE> Fp;
+typedef mcl::FpT<local::FrTag, MCL_MAX_FR_BIT_SIZE> Fr;
+typedef mcl::Fp2T<Fp> Fp2;
+typedef mcl::Fp6T<Fp> Fp6;
+typedef mcl::Fp12T<Fp> Fp12;
+typedef mcl::EcT<Fp> G1;
+typedef mcl::EcT<Fp2> G2;
+typedef Fp12 GT;
+
+typedef mcl::FpDblT<Fp> FpDbl;
+typedef mcl::Fp2DblT<Fp> Fp2Dbl;
+
+inline void Frobenius(Fp2& y, const Fp2& x)
+{
+ Fp2::Frobenius(y, x);
+}
+inline void Frobenius(Fp12& y, const Fp12& x)
+{
+ Fp12::Frobenius(y, x);
+}
+/*
+ twisted Frobenius for G2
+*/
+void Frobenius(G2& D, const G2& S);
+void Frobenius2(G2& D, const G2& S);
+void Frobenius3(G2& D, const G2& S);
+
+namespace local {
+
+typedef mcl::FixedArray<int8_t, 128> SignVec;
+
+inline size_t getPrecomputeQcoeffSize(const SignVec& sv)
+{
+ size_t idx = 2 + 2;
+ for (size_t i = 2; i < sv.size(); i++) {
+ idx++;
+ if (sv[i]) idx++;
+ }
+ return idx;
+}
+
+template<class X, class C, size_t N>
+X evalPoly(const X& x, const C (&c)[N])
+{
+ X ret = c[N - 1];
+ for (size_t i = 1; i < N; i++) {
+ ret *= x;
+ ret += c[N - 1 - i];
+ }
+ return ret;
+}
+
+enum TwistBtype {
+ tb_generic,
+ tb_1m1i, // 1 - 1i
+ tb_1m2i // 1 - 2i
+};
+
+/*
+ l = (a, b, c) => (a, b * P.y, c * P.x)
+*/
+inline void updateLine(Fp6& l, const G1& P)
+{
+ l.b.a *= P.y;
+ l.b.b *= P.y;
+ l.c.a *= P.x;
+ l.c.b *= P.x;
+}
+
+struct Compress {
+ Fp12& z_;
+ Fp2& g1_;
+ Fp2& g2_;
+ Fp2& g3_;
+ Fp2& g4_;
+ Fp2& g5_;
+ // z is output area
+ Compress(Fp12& z, const Fp12& x)
+ : z_(z)
+ , g1_(z.getFp2()[4])
+ , g2_(z.getFp2()[3])
+ , g3_(z.getFp2()[2])
+ , g4_(z.getFp2()[1])
+ , g5_(z.getFp2()[5])
+ {
+ g2_ = x.getFp2()[3];
+ g3_ = x.getFp2()[2];
+ g4_ = x.getFp2()[1];
+ g5_ = x.getFp2()[5];
+ }
+ Compress(Fp12& z, const Compress& c)
+ : z_(z)
+ , g1_(z.getFp2()[4])
+ , g2_(z.getFp2()[3])
+ , g3_(z.getFp2()[2])
+ , g4_(z.getFp2()[1])
+ , g5_(z.getFp2()[5])
+ {
+ g2_ = c.g2_;
+ g3_ = c.g3_;
+ g4_ = c.g4_;
+ g5_ = c.g5_;
+ }
+ void decompressBeforeInv(Fp2& nume, Fp2& denomi) const
+ {
+ assert(&nume != &denomi);
+
+ if (g2_.isZero()) {
+ Fp2::add(nume, g4_, g4_);
+ nume *= g5_;
+ denomi = g3_;
+ } else {
+ Fp2 t;
+ Fp2::sqr(nume, g5_);
+ Fp2::mul_xi(denomi, nume);
+ Fp2::sqr(nume, g4_);
+ Fp2::sub(t, nume, g3_);
+ t += t;
+ t += nume;
+ Fp2::add(nume, denomi, t);
+ Fp2::divBy4(nume, nume);
+ denomi = g2_;
+ }
+ }
+
+ // output to z
+ void decompressAfterInv()
+ {
+ Fp2& g0 = z_.getFp2()[0];
+ Fp2 t0, t1;
+ // Compute g0.
+ Fp2::sqr(t0, g1_);
+ Fp2::mul(t1, g3_, g4_);
+ t0 -= t1;
+ t0 += t0;
+ t0 -= t1;
+ Fp2::mul(t1, g2_, g5_);
+ t0 += t1;
+ Fp2::mul_xi(g0, t0);
+ g0.a += Fp::one();
+ }
+
+public:
+ void decompress() // for test
+ {
+ Fp2 nume, denomi;
+ decompressBeforeInv(nume, denomi);
+ Fp2::inv(denomi, denomi);
+ g1_ = nume * denomi; // g1 is recoverd.
+ decompressAfterInv();
+ }
+ /*
+ 2275clk * 186 = 423Kclk QQQ
+ */
+ static void squareC(Compress& z)
+ {
+ Fp2 t0, t1, t2;
+ Fp2Dbl T0, T1, T2, T3;
+ Fp2Dbl::sqrPre(T0, z.g4_);
+ Fp2Dbl::sqrPre(T1, z.g5_);
+ Fp2Dbl::mul_xi(T2, T1);
+ T2 += T0;
+ Fp2Dbl::mod(t2, T2);
+ Fp2::add(t0, z.g4_, z.g5_);
+ Fp2Dbl::sqrPre(T2, t0);
+ T0 += T1;
+ T2 -= T0;
+ Fp2Dbl::mod(t0, T2);
+ Fp2::add(t1, z.g2_, z.g3_);
+ Fp2Dbl::sqrPre(T3, t1);
+ Fp2Dbl::sqrPre(T2, z.g2_);
+ Fp2::mul_xi(t1, t0);
+ z.g2_ += t1;
+ z.g2_ += z.g2_;
+ z.g2_ += t1;
+ Fp2::sub(t1, t2, z.g3_);
+ t1 += t1;
+ Fp2Dbl::sqrPre(T1, z.g3_);
+ Fp2::add(z.g3_, t1, t2);
+ Fp2Dbl::mul_xi(T0, T1);
+ T0 += T2;
+ Fp2Dbl::mod(t0, T0);
+ Fp2::sub(z.g4_, t0, z.g4_);
+ z.g4_ += z.g4_;
+ z.g4_ += t0;
+ Fp2Dbl::addPre(T2, T2, T1);
+ T3 -= T2;
+ Fp2Dbl::mod(t0, T3);
+ z.g5_ += t0;
+ z.g5_ += z.g5_;
+ z.g5_ += t0;
+ }
+ static void square_n(Compress& z, int n)
+ {
+ for (int i = 0; i < n; i++) {
+ squareC(z);
+ }
+ }
+ /*
+ Exponentiation over compression for:
+ z = x^Param::z.abs()
+ */
+ static void fixed_power(Fp12& z, const Fp12& x)
+ {
+ if (x.isOne()) {
+ z = 1;
+ return;
+ }
+ Fp12 x_org = x;
+ Fp12 d62;
+ Fp2 c55nume, c55denomi, c62nume, c62denomi;
+ Compress c55(z, x);
+ square_n(c55, 55);
+ c55.decompressBeforeInv(c55nume, c55denomi);
+ Compress c62(d62, c55);
+ square_n(c62, 62 - 55);
+ c62.decompressBeforeInv(c62nume, c62denomi);
+ Fp2 acc;
+ Fp2::mul(acc, c55denomi, c62denomi);
+ Fp2::inv(acc, acc);
+ Fp2 t;
+ Fp2::mul(t, acc, c62denomi);
+ Fp2::mul(c55.g1_, c55nume, t);
+ c55.decompressAfterInv();
+ Fp2::mul(t, acc, c55denomi);
+ Fp2::mul(c62.g1_, c62nume, t);
+ c62.decompressAfterInv();
+ z *= x_org;
+ z *= d62;
+ }
+};
+
+struct MapTo {
+ enum {
+ BNtype,
+ BLS12type,
+ STD_ECtype
+ };
+ Fp c1_; // sqrt(-3)
+ Fp c2_; // (-1 + sqrt(-3)) / 2
+ mpz_class z_;
+ mpz_class cofactor_;
+ int type_;
+ bool useNaiveMapTo_;
+
+ int legendre(bool *pb, const Fp& x) const
+ {
+ mpz_class xx;
+ x.getMpz(pb, xx);
+ if (!*pb) return 0;
+ return gmp::legendre(xx, Fp::getOp().mp);
+ }
+ int legendre(bool *pb, const Fp2& x) const
+ {
+ Fp y;
+ Fp2::norm(y, x);
+ return legendre(pb, y);
+ }
+ void mulFp(Fp& x, const Fp& y) const
+ {
+ x *= y;
+ }
+ void mulFp(Fp2& x, const Fp& y) const
+ {
+ x.a *= y;
+ x.b *= y;
+ }
+ /*
+ P.-A. Fouque and M. Tibouchi,
+ "Indifferentiable hashing to Barreto Naehrig curves,"
+ in Proc. Int. Conf. Cryptol. Inform. Security Latin Amer., 2012, vol. 7533, pp.1-17.
+
+ w = sqrt(-3) t / (1 + b + t^2)
+ Remark: throw exception if t = 0, c1, -c1 and b = 2
+ */
+ template<class G, class F>
+ bool calcBN(G& P, const F& t) const
+ {
+ F x, y, w;
+ bool b;
+ bool negative = legendre(&b, t) < 0;
+ if (!b) return false;
+ if (t.isZero()) return false;
+ F::sqr(w, t);
+ w += G::b_;
+ *w.getFp0() += Fp::one();
+ if (w.isZero()) return false;
+ F::inv(w, w);
+ mulFp(w, c1_);
+ w *= t;
+ for (int i = 0; i < 3; i++) {
+ switch (i) {
+ case 0: F::mul(x, t, w); F::neg(x, x); *x.getFp0() += c2_; break;
+ case 1: F::neg(x, x); *x.getFp0() -= Fp::one(); break;
+ case 2: F::sqr(x, w); F::inv(x, x); *x.getFp0() += Fp::one(); break;
+ }
+ G::getWeierstrass(y, x);
+ if (F::squareRoot(y, y)) {
+ if (negative) F::neg(y, y);
+ P.set(&b, x, y, false);
+ assert(b);
+ return true;
+ }
+ }
+ return false;
+ }
+ /*
+ Faster Hashing to G2
+ Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez
+ section 6.1
+ for BN
+ Q = zP + Frob(3zP) + Frob^2(zP) + Frob^3(P)
+ = -(18x^3 + 12x^2 + 3x + 1)cofactor_ P
+ */
+ void mulByCofactorBN(G2& Q, const G2& P) const
+ {
+#if 0
+ G2::mulGeneric(Q, P, cofactor_);
+#else
+#if 0
+ mpz_class t = -(1 + z_ * (3 + z_ * (12 + z_ * 18)));
+ G2::mulGeneric(Q, P, t * cofactor_);
+#else
+ G2 T0, T1, T2;
+ /*
+ G2::mul (GLV method) can't be used because P is not on G2
+ */
+ G2::mulGeneric(T0, P, z_);
+ G2::dbl(T1, T0);
+ T1 += T0; // 3zP
+ Frobenius(T1, T1);
+ Frobenius2(T2, T0);
+ T0 += T1;
+ T0 += T2;
+ Frobenius3(T2, P);
+ G2::add(Q, T0, T2);
+#endif
+#endif
+ }
+ /*
+ 1.2~1.4 times faster than calBN
+ */
+ template<class G, class F>
+ void naiveMapTo(G& P, const F& t) const
+ {
+ F x = t;
+ for (;;) {
+ F y;
+ G::getWeierstrass(y, x);
+ if (F::squareRoot(y, y)) {
+ bool b;
+ P.set(&b, x, y, false);
+ assert(b);
+ return;
+ }
+ *x.getFp0() += Fp::one();
+ }
+ }
+ /*
+ #(Fp) / r = (z + 1 - t) / r = (z - 1)^2 / 3
+ */
+ void mulByCofactorBLS12(G1& Q, const G1& P) const
+ {
+ G1::mulGeneric(Q, P, cofactor_);
+ }
+ /*
+ Efficient hash maps to G2 on BLS curves
+ Alessandro Budroni, Federico Pintore
+ Q = (z(z-1)-1)P + Frob((z-1)P) + Frob^2(2P)
+ */
+ void mulByCofactorBLS12(G2& Q, const G2& P) const
+ {
+ G2 T0, T1;
+ G2::mulGeneric(T0, P, z_ - 1);
+ G2::mulGeneric(T1, T0, z_);
+ T1 -= P;
+ Frobenius(T0, T0);
+ T0 += T1;
+ G2::dbl(T1, P);
+ Frobenius2(T1, T1);
+ G2::add(Q, T0, T1);
+ }
+ /*
+ cofactor_ is for G2(not used now)
+ */
+ void initBN(const mpz_class& cofactor, const mpz_class &z, int curveType)
+ {
+ z_ = z;
+ cofactor_ = cofactor;
+ if (curveType == MCL_BN254) {
+ const char *c1 = "252364824000000126cd890000000003cf0f0000000000060c00000000000004";
+ const char *c2 = "25236482400000017080eb4000000006181800000000000cd98000000000000b";
+ bool b;
+ c1_.setStr(&b, c1, 16);
+ c2_.setStr(&b, c2, 16);
+ (void)b;
+ return;
+ }
+ bool b = Fp::squareRoot(c1_, -3);
+ assert(b);
+ (void)b;
+ c2_ = (c1_ - 1) / 2;
+ }
+ void initBLS12(const mpz_class& z)
+ {
+ z_ = z;
+ // cofactor for G1
+ cofactor_ = (z - 1) * (z - 1) / 3;
+ bool b = Fp::squareRoot(c1_, -3);
+ assert(b);
+ (void)b;
+ c2_ = (c1_ - 1) / 2;
+ }
+ /*
+ if type == STD_ECtype, then cofactor, z are not used.
+ */
+ void init(const mpz_class& cofactor, const mpz_class &z, int curveType)
+ {
+ if (0 <= curveType && curveType < MCL_EC_BEGIN) {
+ type_ = curveType == MCL_BLS12_381 ? BLS12type : BNtype;
+ } else {
+ type_ = STD_ECtype;
+ }
+ if (type_ == STD_ECtype) {
+ useNaiveMapTo_ = true;
+ } else {
+ useNaiveMapTo_ = false;
+ }
+#ifdef MCL_USE_OLD_MAPTO_FOR_BLS12
+ if (type == BLS12type) useNaiveMapTo_ = true;
+#endif
+ if (type_ == BNtype) {
+ initBN(cofactor, z, curveType);
+ } else if (type_ == BLS12type) {
+ initBLS12(z);
+ }
+ }
+ bool calcG1(G1& P, const Fp& t) const
+ {
+ if (useNaiveMapTo_) {
+ naiveMapTo<G1, Fp>(P, t);
+ } else {
+ if (!calcBN<G1, Fp>(P, t)) return false;
+ }
+ switch (type_) {
+ case BNtype:
+ // no subgroup
+ break;
+ case BLS12type:
+ mulByCofactorBLS12(P, P);
+ break;
+ }
+ assert(P.isValid());
+ return true;
+ }
+ /*
+ get the element in G2 by multiplying the cofactor
+ */
+ bool calcG2(G2& P, const Fp2& t) const
+ {
+ if (useNaiveMapTo_) {
+ naiveMapTo<G2, Fp2>(P, t);
+ } else {
+ if (!calcBN<G2, Fp2>(P, t)) return false;
+ }
+ switch(type_) {
+ case BNtype:
+ mulByCofactorBN(P, P);
+ break;
+ case BLS12type:
+ mulByCofactorBLS12(P, P);
+ break;
+ }
+ assert(P.isValid());
+ return true;
+ }
+};
+
+/*
+ Software implementation of Attribute-Based Encryption: Appendixes
+ GLV for G1 on BN/BLS12
+*/
+struct GLV1 {
+ Fp rw; // rw = 1 / w = (-1 - sqrt(-3)) / 2
+ size_t rBitSize;
+ mpz_class v0, v1;
+ mpz_class B[2][2];
+ mpz_class r;
+private:
+ bool usePrecomputedTable(int curveType)
+ {
+ if (curveType < 0) return false;
+ const struct Tbl {
+ int curveType;
+ const char *rw;
+ size_t rBitSize;
+ const char *v0, *v1;
+ const char *B[2][2];
+ const char *r;
+ } tbl[] = {
+ {
+ MCL_BN254,
+ "49b36240000000024909000000000006cd80000000000007",
+ 256,
+ "2a01fab7e04a017b9c0eb31ff36bf3357",
+ "37937ca688a6b4904",
+ {
+ {
+ "61818000000000028500000000000004",
+ "8100000000000001",
+ },
+ {
+ "8100000000000001",
+ "-61818000000000020400000000000003",
+ },
+ },
+ "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d",
+ },
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ if (tbl[i].curveType != curveType) continue;
+ bool b;
+ rw.setStr(&b, tbl[i].rw, 16); if (!b) continue;
+ rBitSize = tbl[i].rBitSize;
+ mcl::gmp::setStr(&b, v0, tbl[i].v0, 16); if (!b) continue;
+ mcl::gmp::setStr(&b, v1, tbl[i].v1, 16); if (!b) continue;
+ mcl::gmp::setStr(&b, B[0][0], tbl[i].B[0][0], 16); if (!b) continue;
+ mcl::gmp::setStr(&b, B[0][1], tbl[i].B[0][1], 16); if (!b) continue;
+ mcl::gmp::setStr(&b, B[1][0], tbl[i].B[1][0], 16); if (!b) continue;
+ mcl::gmp::setStr(&b, B[1][1], tbl[i].B[1][1], 16); if (!b) continue;
+ mcl::gmp::setStr(&b, r, tbl[i].r, 16); if (!b) continue;
+ return true;
+ }
+ return false;
+ }
+public:
+ bool operator==(const GLV1& rhs) const
+ {
+ return rw == rhs.rw && rBitSize == rhs.rBitSize && v0 == rhs.v0 && v1 == rhs.v1
+ && B[0][0] == rhs.B[0][0] && B[0][1] == rhs.B[0][1] && B[1][0] == rhs.B[1][0]
+ && B[1][1] == rhs.B[1][1] && r == rhs.r;
+ }
+ bool operator!=(const GLV1& rhs) const { return !operator==(rhs); }
+#ifndef CYBOZU_DONT_USE_STRING
+ void dump(const mpz_class& x) const
+ {
+ printf("\"%s\",\n", mcl::gmp::getStr(x, 16).c_str());
+ }
+ void dump() const
+ {
+ printf("\"%s\",\n", rw.getStr(16).c_str());
+ printf("%d,\n", (int)rBitSize);
+ dump(v0);
+ dump(v1);
+ dump(B[0][0]); dump(B[0][1]); dump(B[1][0]); dump(B[1][1]);
+ dump(r);
+ }
+#endif
+ void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false, int curveType = -1)
+ {
+ if (usePrecomputedTable(curveType)) return;
+ bool b = Fp::squareRoot(rw, -3);
+ assert(b);
+ (void)b;
+ rw = -(rw + 1) / 2;
+ this->r = r;
+ rBitSize = gmp::getBitSize(r);
+ rBitSize = (rBitSize + fp::UnitBitSize - 1) & ~(fp::UnitBitSize - 1);// a little better size
+ if (isBLS12) {
+ /*
+ BLS12
+ L = z^4
+ (-z^2+1) + L = 0
+ 1 + z^2 L = 0
+ */
+ B[0][0] = -z * z + 1;
+ B[0][1] = 1;
+ B[1][0] = 1;
+ B[1][1] = z * z;
+ } else {
+ /*
+ BN
+ L = 36z^4 - 1
+ (6z^2+2z) - (2z+1) L = 0
+ (-2z-1) - (6z^2+4z+1)L = 0
+ */
+ B[0][0] = 6 * z * z + 2 * z;
+ B[0][1] = -2 * z - 1;
+ B[1][0] = -2 * z - 1;
+ B[1][1] = -6 * z * z - 4 * z - 1;
+ }
+ // [v0 v1] = [r 0] * B^(-1)
+ v0 = ((-B[1][1]) << rBitSize) / r;
+ v1 = ((B[1][0]) << rBitSize) / r;
+ }
+ /*
+ L = lambda = p^4
+ L (x, y) = (rw x, y)
+ */
+ void mulLambda(G1& Q, const G1& P) const
+ {
+ Fp::mul(Q.x, P.x, rw);
+ Q.y = P.y;
+ Q.z = P.z;
+ }
+ /*
+ x = a + b * lambda mod r
+ */
+ void split(mpz_class& a, mpz_class& b, const mpz_class& x) const
+ {
+ mpz_class t;
+ t = (x * v0) >> rBitSize;
+ b = (x * v1) >> rBitSize;
+ a = x - (t * B[0][0] + b * B[1][0]);
+ b = - (t * B[0][1] + b * B[1][1]);
+ }
+ void mul(G1& Q, const G1& P, mpz_class x, bool constTime = false) const
+ {
+ typedef mcl::fp::Unit Unit;
+ const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize;
+ const int splitN = 2;
+ mpz_class u[splitN];
+ G1 in[splitN];
+ G1 tbl[4];
+ int bitTbl[splitN]; // bit size of u[i]
+ Unit w[splitN][maxUnit]; // unit array of u[i]
+ int maxBit = 0; // max bit of u[i]
+ int maxN = 0;
+ int remainBit = 0;
+
+ x %= r;
+ if (x == 0) {
+ Q.clear();
+ if (constTime) goto DummyLoop;
+ return;
+ }
+ if (x < 0) {
+ x += r;
+ }
+ split(u[0], u[1], x);
+ in[0] = P;
+ mulLambda(in[1], in[0]);
+ for (int i = 0; i < splitN; i++) {
+ if (u[i] < 0) {
+ u[i] = -u[i];
+ G1::neg(in[i], in[i]);
+ }
+ in[i].normalize();
+ }
+#if 0
+ G1::mulGeneric(in[0], in[0], u[0]);
+ G1::mulGeneric(in[1], in[1], u[1]);
+ G1::add(Q, in[0], in[1]);
+ return;
+#else
+ tbl[0] = in[0]; // dummy
+ tbl[1] = in[0];
+ tbl[2] = in[1];
+ G1::add(tbl[3], in[0], in[1]);
+ tbl[3].normalize();
+ for (int i = 0; i < splitN; i++) {
+ bool b;
+ mcl::gmp::getArray(&b, w[i], maxUnit, u[i]);
+ assert(b);
+ bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]);
+ maxBit = fp::max_(maxBit, bitTbl[i]);
+ }
+ assert(maxBit > 0);
+ maxBit--;
+ /*
+ maxBit = maxN * UnitBitSize + remainBit
+ 0 < remainBit <= UnitBitSize
+ */
+ maxN = maxBit / mcl::fp::UnitBitSize;
+ remainBit = maxBit % mcl::fp::UnitBitSize;
+ remainBit++;
+ Q.clear();
+ for (int i = maxN; i >= 0; i--) {
+ for (int j = remainBit - 1; j >= 0; j--) {
+ G1::dbl(Q, Q);
+ uint32_t b0 = (w[0][i] >> j) & 1;
+ uint32_t b1 = (w[1][i] >> j) & 1;
+ uint32_t c = b1 * 2 + b0;
+ if (c == 0) {
+ if (constTime) tbl[0] += tbl[1];
+ } else {
+ Q += tbl[c];
+ }
+ }
+ remainBit = (int)mcl::fp::UnitBitSize;
+ }
+#endif
+ DummyLoop:
+ if (!constTime) return;
+ const int limitBit = (int)rBitSize / splitN;
+ G1 D = tbl[0];
+ for (int i = maxBit + 1; i < limitBit; i++) {
+ G1::dbl(D, D);
+ D += tbl[0];
+ }
+ }
+};
+
+/*
+ GLV method for G2 and GT on BN/BLS12
+*/
+struct GLV2 {
+ size_t rBitSize;
+ mpz_class B[4][4];
+ mpz_class r;
+ mpz_class v[4];
+ mpz_class z;
+ mpz_class abs_z;
+ bool isBLS12;
+ GLV2() : rBitSize(0), isBLS12(false) {}
+ void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false)
+ {
+ this->r = r;
+ this->z = z;
+ this->abs_z = z < 0 ? -z : z;
+ this->isBLS12 = isBLS12;
+ rBitSize = mcl::gmp::getBitSize(r);
+ rBitSize = (rBitSize + mcl::fp::UnitBitSize - 1) & ~(mcl::fp::UnitBitSize - 1);// a little better size
+ mpz_class z2p1 = z * 2 + 1;
+ B[0][0] = z + 1;
+ B[0][1] = z;
+ B[0][2] = z;
+ B[0][3] = -2 * z;
+ B[1][0] = z2p1;
+ B[1][1] = -z;
+ B[1][2] = -(z + 1);
+ B[1][3] = -z;
+ B[2][0] = 2 * z;
+ B[2][1] = z2p1;
+ B[2][2] = z2p1;
+ B[2][3] = z2p1;
+ B[3][0] = z - 1;
+ B[3][1] = 2 * z2p1;
+ B[3][2] = -2 * z + 1;
+ B[3][3] = z - 1;
+ /*
+ v[] = [r 0 0 0] * B^(-1) = [2z^2+3z+1, 12z^3+8z^2+z, 6z^3+4z^2+z, -(2z+1)]
+ */
+ const char *zBN254 = "-4080000000000001";
+ mpz_class t;
+ bool b;
+ mcl::gmp::setStr(&b, t, zBN254, 16);
+ assert(b);
+ (void)b;
+ if (z == t) {
+ static const char *vTblBN254[] = {
+ "e00a8e7f56e007e5b09fe7fdf43ba998",
+ "-152aff56a8054abf9da75db2da3d6885101e5fd3997d41cb1",
+ "-a957fab5402a55fced3aed96d1eb44295f40f136ee84e09b",
+ "-e00a8e7f56e007e929d7b2667ea6f29c",
+ };
+ for (int i = 0; i < 4; i++) {
+ mcl::gmp::setStr(&b, v[i], vTblBN254[i], 16);
+ assert(b);
+ (void)b;
+ }
+ } else {
+ v[0] = ((1 + z * (3 + z * 2)) << rBitSize) / r;
+ v[1] = ((z * (1 + z * (8 + z * 12))) << rBitSize) / r;
+ v[2] = ((z * (1 + z * (4 + z * 6))) << rBitSize) / r;
+ v[3] = -((z * (1 + z * 2)) << rBitSize) / r;
+ }
+ }
+ /*
+ u[] = [x, 0, 0, 0] - v[] * x * B
+ */
+ void split(mpz_class u[4], const mpz_class& x) const
+ {
+ if (isBLS12) {
+ /*
+ Frob(P) = zP
+ x = u[0] + u[1] z + u[2] z^2 + u[3] z^3
+ */
+ bool isNeg = false;
+ mpz_class t = x;
+ if (t < 0) {
+ t = -t;
+ isNeg = true;
+ }
+ for (int i = 0; i < 4; i++) {
+ // t = t / abs_z, u[i] = t % abs_z
+ mcl::gmp::divmod(t, u[i], t, abs_z);
+ if (((z < 0) && (i & 1)) ^ isNeg) {
+ u[i] = -u[i];
+ }
+ }
+ return;
+ }
+ // BN
+ mpz_class t[4];
+ for (int i = 0; i < 4; i++) {
+ t[i] = (x * v[i]) >> rBitSize;
+ }
+ for (int i = 0; i < 4; i++) {
+ u[i] = (i == 0) ? x : 0;
+ for (int j = 0; j < 4; j++) {
+ u[i] -= t[j] * B[j][i];
+ }
+ }
+ }
+ template<class T>
+ void mul(T& Q, const T& P, mpz_class x, bool constTime = false) const
+ {
+#if 0 // #ifndef NDEBUG
+ {
+ T R;
+ T::mulGeneric(R, P, r);
+ assert(R.isZero());
+ }
+#endif
+ typedef mcl::fp::Unit Unit;
+ const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize;
+ const int splitN = 4;
+ mpz_class u[splitN];
+ T in[splitN];
+ T tbl[16];
+ int bitTbl[splitN]; // bit size of u[i]
+ Unit w[splitN][maxUnit]; // unit array of u[i]
+ int maxBit = 0; // max bit of u[i]
+ int maxN = 0;
+ int remainBit = 0;
+
+ x %= r;
+ if (x == 0) {
+ Q.clear();
+ if (constTime) goto DummyLoop;
+ return;
+ }
+ if (x < 0) {
+ x += r;
+ }
+ split(u, x);
+ in[0] = P;
+ Frobenius(in[1], in[0]);
+ Frobenius(in[2], in[1]);
+ Frobenius(in[3], in[2]);
+ for (int i = 0; i < splitN; i++) {
+ if (u[i] < 0) {
+ u[i] = -u[i];
+ T::neg(in[i], in[i]);
+ }
+// in[i].normalize(); // slow
+ }
+#if 0
+ for (int i = 0; i < splitN; i++) {
+ T::mulGeneric(in[i], in[i], u[i]);
+ }
+ T::add(Q, in[0], in[1]);
+ Q += in[2];
+ Q += in[3];
+ return;
+#else
+ tbl[0] = in[0];
+ for (size_t i = 1; i < 16; i++) {
+ tbl[i].clear();
+ if (i & 1) {
+ tbl[i] += in[0];
+ }
+ if (i & 2) {
+ tbl[i] += in[1];
+ }
+ if (i & 4) {
+ tbl[i] += in[2];
+ }
+ if (i & 8) {
+ tbl[i] += in[3];
+ }
+// tbl[i].normalize();
+ }
+ for (int i = 0; i < splitN; i++) {
+ bool b;
+ mcl::gmp::getArray(&b, w[i], maxUnit, u[i]);
+ assert(b);
+ bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]);
+ maxBit = fp::max_(maxBit, bitTbl[i]);
+ }
+ maxBit--;
+ /*
+ maxBit = maxN * UnitBitSize + remainBit
+ 0 < remainBit <= UnitBitSize
+ */
+ maxN = maxBit / mcl::fp::UnitBitSize;
+ remainBit = maxBit % mcl::fp::UnitBitSize;
+ remainBit++;
+ Q.clear();
+ for (int i = maxN; i >= 0; i--) {
+ for (int j = remainBit - 1; j >= 0; j--) {
+ T::dbl(Q, Q);
+ uint32_t b0 = (w[0][i] >> j) & 1;
+ uint32_t b1 = (w[1][i] >> j) & 1;
+ uint32_t b2 = (w[2][i] >> j) & 1;
+ uint32_t b3 = (w[3][i] >> j) & 1;
+ uint32_t c = b3 * 8 + b2 * 4 + b1 * 2 + b0;
+ if (c == 0) {
+ if (constTime) tbl[0] += tbl[1];
+ } else {
+ Q += tbl[c];
+ }
+ }
+ remainBit = (int)mcl::fp::UnitBitSize;
+ }
+#endif
+ DummyLoop:
+ if (!constTime) return;
+ const int limitBit = (int)rBitSize / splitN;
+ T D = tbl[0];
+ for (int i = maxBit + 1; i < limitBit; i++) {
+ T::dbl(D, D);
+ D += tbl[0];
+ }
+ }
+ void pow(Fp12& z, const Fp12& x, mpz_class y, bool constTime = false) const
+ {
+ typedef GroupMtoA<Fp12> AG; // as additive group
+ AG& _z = static_cast<AG&>(z);
+ const AG& _x = static_cast<const AG&>(x);
+ mul(_z, _x, y, constTime);
+ }
+};
+
+struct Param {
+ CurveParam cp;
+ mpz_class z;
+ mpz_class abs_z;
+ bool isNegative;
+ bool isBLS12;
+ mpz_class p;
+ mpz_class r;
+ local::MapTo mapTo;
+ local::GLV1 glv1;
+ local::GLV2 glv2;
+ // for G2 Frobenius
+ Fp2 g2;
+ Fp2 g3;
+ /*
+ Dtype twist
+ (x', y') = phi(x, y) = (x/w^2, y/w^3)
+ y^2 = x^3 + b
+ => (y'w^3)^2 = (x'w^2)^3 + b
+ => y'^2 = x'^3 + b / w^6 ; w^6 = xi
+ => y'^2 = x'^3 + twist_b;
+ */
+ Fp2 twist_b;
+ local::TwistBtype twist_b_type;
+/*
+ mpz_class exp_c0;
+ mpz_class exp_c1;
+ mpz_class exp_c2;
+ mpz_class exp_c3;
+*/
+
+ // Loop parameter for the Miller loop part of opt. ate pairing.
+ local::SignVec siTbl;
+ size_t precomputedQcoeffSize;
+ bool useNAF;
+ local::SignVec zReplTbl;
+
+ // for initG1only
+ G1 basePoint;
+
+ void init(bool *pb, const mcl::CurveParam& cp, fp::Mode mode)
+ {
+ this->cp = cp;
+ isBLS12 = cp.curveType == MCL_BLS12_381;
+ gmp::setStr(pb, z, cp.z);
+ if (!*pb) return;
+ isNegative = z < 0;
+ if (isNegative) {
+ abs_z = -z;
+ } else {
+ abs_z = z;
+ }
+ if (isBLS12) {
+ mpz_class z2 = z * z;
+ mpz_class z4 = z2 * z2;
+ r = z4 - z2 + 1;
+ p = z - 1;
+ p = p * p * r / 3 + z;
+ } else {
+ const int pCoff[] = { 1, 6, 24, 36, 36 };
+ const int rCoff[] = { 1, 6, 18, 36, 36 };
+ p = local::evalPoly(z, pCoff);
+ assert((p % 6) == 1);
+ r = local::evalPoly(z, rCoff);
+ }
+ Fr::init(pb, r, mode);
+ if (!*pb) return;
+ Fp::init(pb, cp.xi_a, p, mode);
+ if (!*pb) return;
+ Fp2::init();
+ const Fp2 xi(cp.xi_a, 1);
+ g2 = Fp2::get_gTbl()[0];
+ g3 = Fp2::get_gTbl()[3];
+ if (cp.isMtype) {
+ Fp2::inv(g2, g2);
+ Fp2::inv(g3, g3);
+ }
+ if (cp.isMtype) {
+ twist_b = Fp2(cp.b) * xi;
+ } else {
+ if (cp.b == 2 && cp.xi_a == 1) {
+ twist_b = Fp2(1, -1); // shortcut
+ } else {
+ twist_b = Fp2(cp.b) / xi;
+ }
+ }
+ if (twist_b == Fp2(1, -1)) {
+ twist_b_type = tb_1m1i;
+ } else if (twist_b == Fp2(1, -2)) {
+ twist_b_type = tb_1m2i;
+ } else {
+ twist_b_type = tb_generic;
+ }
+ G1::init(0, cp.b, mcl::ec::Proj);
+ if (isBLS12) {
+ G1::setOrder(r);
+ }
+ G2::init(0, twist_b, mcl::ec::Proj);
+ G2::setOrder(r);
+
+ const mpz_class largest_c = isBLS12 ? abs_z : gmp::abs(z * 6 + 2);
+ useNAF = gmp::getNAF(siTbl, largest_c);
+ precomputedQcoeffSize = local::getPrecomputeQcoeffSize(siTbl);
+ gmp::getNAF(zReplTbl, gmp::abs(z));
+/*
+ if (isBLS12) {
+ mpz_class z2 = z * z;
+ mpz_class z3 = z2 * z;
+ mpz_class z4 = z3 * z;
+ mpz_class z5 = z4 * z;
+ exp_c0 = z5 - 2 * z4 + 2 * z2 - z + 3;
+ exp_c1 = z4 - 2 * z3 + 2 * z - 1;
+ exp_c2 = z3 - 2 * z2 + z;
+ exp_c3 = z2 - 2 * z + 1;
+ } else {
+ exp_c0 = -2 + z * (-18 + z * (-30 - 36 * z));
+ exp_c1 = 1 + z * (-12 + z * (-18 - 36 * z));
+ exp_c2 = 6 * z * z + 1;
+ }
+*/
+ if (isBLS12) {
+ mapTo.init(0, z, cp.curveType);
+ } else {
+ mapTo.init(2 * p - r, z, cp.curveType);
+ }
+ glv1.init(r, z, isBLS12, cp.curveType);
+ glv2.init(r, z, isBLS12);
+ basePoint.clear();
+ *pb = true;
+ }
+ void initG1only(bool *pb, const mcl::EcParam& para)
+ {
+ Fp::init(pb, para.p);
+ if (!*pb) return;
+ Fr::init(pb, para.n);
+ if (!*pb) return;
+ G1::init(pb, para.a, para.b);
+ if (!*pb) return;
+ G1::setOrder(Fr::getOp().mp);
+ mapTo.init(0, 0, para.curveType);
+ Fp x0, y0;
+ x0.setStr(pb, para.gx);
+ if (!*pb) return;
+ y0.setStr(pb, para.gy);
+ basePoint.set(pb, x0, y0);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void init(const mcl::CurveParam& cp, fp::Mode mode)
+ {
+ bool b;
+ init(&b, cp, mode);
+ if (!b) throw cybozu::Exception("Param:init");
+ }
+#endif
+};
+
+template<size_t dummyImpl = 0>
+struct StaticVar {
+ static local::Param param;
+};
+
+template<size_t dummyImpl>
+local::Param StaticVar<dummyImpl>::param;
+
+} // mcl::bn::local
+
+namespace BN {
+
+static const local::Param& param = local::StaticVar<>::param;
+
+} // mcl::bn::BN
+
+namespace local {
+
+inline void mulArrayGLV1(G1& z, const G1& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime)
+{
+ mpz_class s;
+ bool b;
+ mcl::gmp::setArray(&b, s, y, yn);
+ assert(b);
+ if (isNegative) s = -s;
+ BN::param.glv1.mul(z, x, s, constTime);
+}
+inline void mulArrayGLV2(G2& z, const G2& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime)
+{
+ mpz_class s;
+ bool b;
+ mcl::gmp::setArray(&b, s, y, yn);
+ assert(b);
+ if (isNegative) s = -s;
+ BN::param.glv2.mul(z, x, s, constTime);
+}
+inline void powArrayGLV2(Fp12& z, const Fp12& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime)
+{
+ mpz_class s;
+ bool b;
+ mcl::gmp::setArray(&b, s, y, yn);
+ assert(b);
+ if (isNegative) s = -s;
+ BN::param.glv2.pow(z, x, s, constTime);
+}
+
+/*
+ Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions
+ Robert Granger, Michael Scott
+*/
+inline void sqrFp4(Fp2& z0, Fp2& z1, const Fp2& x0, const Fp2& x1)
+{
+#if 1
+ Fp2Dbl T0, T1, T2;
+ Fp2Dbl::sqrPre(T0, x0);
+ Fp2Dbl::sqrPre(T1, x1);
+ Fp2Dbl::mul_xi(T2, T1);
+ Fp2Dbl::add(T2, T2, T0);
+ Fp2::add(z1, x0, x1);
+ Fp2Dbl::mod(z0, T2);
+ Fp2Dbl::sqrPre(T2, z1);
+ Fp2Dbl::sub(T2, T2, T0);
+ Fp2Dbl::sub(T2, T2, T1);
+ Fp2Dbl::mod(z1, T2);
+#else
+ Fp2 t0, t1, t2;
+ Fp2::sqr(t0, x0);
+ Fp2::sqr(t1, x1);
+ Fp2::mul_xi(z0, t1);
+ z0 += t0;
+ Fp2::add(z1, x0, x1);
+ Fp2::sqr(z1, z1);
+ z1 -= t0;
+ z1 -= t1;
+#endif
+}
+
+inline void fasterSqr(Fp12& y, const Fp12& x)
+{
+#if 0
+ Fp12::sqr(y, x);
+#else
+ const Fp2& x0(x.a.a);
+ const Fp2& x4(x.a.b);
+ const Fp2& x3(x.a.c);
+ const Fp2& x2(x.b.a);
+ const Fp2& x1(x.b.b);
+ const Fp2& x5(x.b.c);
+ Fp2& y0(y.a.a);
+ Fp2& y4(y.a.b);
+ Fp2& y3(y.a.c);
+ Fp2& y2(y.b.a);
+ Fp2& y1(y.b.b);
+ Fp2& y5(y.b.c);
+ Fp2 t0, t1;
+ sqrFp4(t0, t1, x0, x1);
+ Fp2::sub(y0, t0, x0);
+ y0 += y0;
+ y0 += t0;
+ Fp2::add(y1, t1, x1);
+ y1 += y1;
+ y1 += t1;
+ Fp2 t2, t3;
+ sqrFp4(t0, t1, x2, x3);
+ sqrFp4(t2, t3, x4, x5);
+ Fp2::sub(y4, t0, x4);
+ y4 += y4;
+ y4 += t0;
+ Fp2::add(y5, t1, x5);
+ y5 += y5;
+ y5 += t1;
+ Fp2::mul_xi(t0, t3);
+ Fp2::add(y2, t0, x2);
+ y2 += y2;
+ y2 += t0;
+ Fp2::sub(y3, t2, x3);
+ y3 += y3;
+ y3 += t2;
+#endif
+}
+
+/*
+ y = x^z if z > 0
+ = unitaryInv(x^(-z)) if z < 0
+*/
+inline void pow_z(Fp12& y, const Fp12& x)
+{
+#if 1
+ if (BN::param.cp.curveType == MCL_BN254) {
+ Compress::fixed_power(y, x);
+ } else {
+ Fp12 orgX = x;
+ y = x;
+ Fp12 conj;
+ conj.a = x.a;
+ Fp6::neg(conj.b, x.b);
+ for (size_t i = 1; i < BN::param.zReplTbl.size(); i++) {
+ fasterSqr(y, y);
+ if (BN::param.zReplTbl[i] > 0) {
+ y *= orgX;
+ } else if (BN::param.zReplTbl[i] < 0) {
+ y *= conj;
+ }
+ }
+ }
+#else
+ Fp12::pow(y, x, param.abs_z);
+#endif
+ if (BN::param.isNegative) {
+ Fp12::unitaryInv(y, y);
+ }
+}
+inline void mul_twist_b(Fp2& y, const Fp2& x)
+{
+ switch (BN::param.twist_b_type) {
+ case local::tb_1m1i:
+ /*
+ b / xi = 1 - 1i
+ (a + bi)(1 - 1i) = (a + b) + (b - a)i
+ */
+ {
+ Fp t;
+ Fp::add(t, x.a, x.b);
+ Fp::sub(y.b, x.b, x.a);
+ y.a = t;
+ }
+ return;
+ case local::tb_1m2i:
+ /*
+ b / xi = 1 - 2i
+ (a + bi)(1 - 2i) = (a + 2b) + (b - 2a)i
+ */
+ {
+ Fp t;
+ Fp::sub(t, x.b, x.a);
+ t -= x.a;
+ Fp::add(y.a, x.a, x.b);
+ y.a += x.b;
+ y.b = t;
+ }
+ return;
+ case local::tb_generic:
+ Fp2::mul(y, x, BN::param.twist_b);
+ return;
+ }
+}
+
+inline void dblLineWithoutP(Fp6& l, G2& Q)
+{
+ Fp2 t0, t1, t2, t3, t4, t5;
+ Fp2Dbl T0, T1;
+ Fp2::sqr(t0, Q.z);
+ Fp2::mul(t4, Q.x, Q.y);
+ Fp2::sqr(t1, Q.y);
+ Fp2::add(t3, t0, t0);
+ Fp2::divBy2(t4, t4);
+ Fp2::add(t5, t0, t1);
+ t0 += t3;
+ mul_twist_b(t2, t0);
+ Fp2::sqr(t0, Q.x);
+ Fp2::add(t3, t2, t2);
+ t3 += t2;
+ Fp2::sub(Q.x, t1, t3);
+ t3 += t1;
+ Q.x *= t4;
+ Fp2::divBy2(t3, t3);
+ Fp2Dbl::sqrPre(T0, t3);
+ Fp2Dbl::sqrPre(T1, t2);
+ Fp2Dbl::sub(T0, T0, T1);
+ Fp2Dbl::add(T1, T1, T1);
+ Fp2Dbl::sub(T0, T0, T1);
+ Fp2::add(t3, Q.y, Q.z);
+ Fp2Dbl::mod(Q.y, T0);
+ Fp2::sqr(t3, t3);
+ t3 -= t5;
+ Fp2::mul(Q.z, t1, t3);
+ Fp2::sub(l.a, t2, t1);
+ l.c = t0;
+ l.b = t3;
+}
+inline void addLineWithoutP(Fp6& l, G2& R, const G2& Q)
+{
+ Fp2 t1, t2, t3, t4;
+ Fp2Dbl T1, T2;
+ Fp2::mul(t1, R.z, Q.x);
+ Fp2::mul(t2, R.z, Q.y);
+ Fp2::sub(t1, R.x, t1);
+ Fp2::sub(t2, R.y, t2);
+ Fp2::sqr(t3, t1);
+ Fp2::mul(R.x, t3, R.x);
+ Fp2::sqr(t4, t2);
+ t3 *= t1;
+ t4 *= R.z;
+ t4 += t3;
+ t4 -= R.x;
+ t4 -= R.x;
+ R.x -= t4;
+ Fp2Dbl::mulPre(T1, t2, R.x);
+ Fp2Dbl::mulPre(T2, t3, R.y);
+ Fp2Dbl::sub(T2, T1, T2);
+ Fp2Dbl::mod(R.y, T2);
+ Fp2::mul(R.x, t1, t4);
+ Fp2::mul(R.z, t3, R.z);
+ Fp2::neg(l.c, t2);
+ Fp2Dbl::mulPre(T1, t2, Q.x);
+ Fp2Dbl::mulPre(T2, t1, Q.y);
+ Fp2Dbl::sub(T1, T1, T2);
+ l.b = t1;
+ Fp2Dbl::mod(l.a, T1);
+}
+inline void dblLine(Fp6& l, G2& Q, const G1& P)
+{
+ dblLineWithoutP(l, Q);
+ local::updateLine(l, P);
+}
+inline void addLine(Fp6& l, G2& R, const G2& Q, const G1& P)
+{
+ addLineWithoutP(l, R, Q);
+ local::updateLine(l, P);
+}
+inline void mulFp6cb_by_G1xy(Fp6& y, const Fp6& x, const G1& P)
+{
+ assert(P.isNormalized());
+ if (&y != &x) y.a = x.a;
+ Fp2::mulFp(y.c, x.c, P.x);
+ Fp2::mulFp(y.b, x.b, P.y);
+}
+
+/*
+ x = a + bv + cv^2
+ y = (y0, y4, y2) -> (y0, 0, y2, 0, y4, 0)
+ z = xy = (a + bv + cv^2)(d + ev)
+ = (ad + ce xi) + ((a + b)(d + e) - ad - be)v + (be + cd)v^2
+*/
+inline void Fp6mul_01(Fp6& z, const Fp6& x, const Fp2& d, const Fp2& e)
+{
+ const Fp2& a = x.a;
+ const Fp2& b = x.b;
+ const Fp2& c = x.c;
+ Fp2 t0, t1;
+ Fp2Dbl AD, CE, BE, CD, T;
+ Fp2Dbl::mulPre(AD, a, d);
+ Fp2Dbl::mulPre(CE, c, e);
+ Fp2Dbl::mulPre(BE, b, e);
+ Fp2Dbl::mulPre(CD, c, d);
+ Fp2::add(t0, a, b);
+ Fp2::add(t1, d, e);
+ Fp2Dbl::mulPre(T, t0, t1);
+ T -= AD;
+ T -= BE;
+ Fp2Dbl::mod(z.b, T);
+ Fp2Dbl::mul_xi(CE, CE);
+ AD += CE;
+ Fp2Dbl::mod(z.a, AD);
+ BE += CD;
+ Fp2Dbl::mod(z.c, BE);
+}
+/*
+ input
+ z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w
+ 0 3 4
+ x = (a, b, c) -> (b, 0, 0, c, a, 0) = X0 + X1w
+ X0 = b = (b, 0, 0)
+ X1 = c + av = (c, a, 0)
+ w^2 = v, v^3 = xi
+ output
+ z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w
+ Z0X0 = Z0 b
+ Z1X1 = Z1 (c, a, 0)
+ (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (b + c, a, 0)
+*/
+inline void mul_403(Fp12& z, const Fp6& x)
+{
+ const Fp2& a = x.a;
+ const Fp2& b = x.b;
+ const Fp2& c = x.c;
+#if 1
+ Fp6& z0 = z.a;
+ Fp6& z1 = z.b;
+ Fp6 z0x0, z1x1, t0;
+ Fp2 t1;
+ Fp2::add(t1, x.b, c);
+ Fp6::add(t0, z0, z1);
+ Fp2::mul(z0x0.a, z0.a, b);
+ Fp2::mul(z0x0.b, z0.b, b);
+ Fp2::mul(z0x0.c, z0.c, b);
+ Fp6mul_01(z1x1, z1, c, a);
+ Fp6mul_01(t0, t0, t1, a);
+ Fp6::sub(z.b, t0, z0x0);
+ z.b -= z1x1;
+ // a + bv + cv^2 = cxi + av + bv^2
+ Fp2::mul_xi(z1x1.c, z1x1.c);
+ Fp2::add(z.a.a, z0x0.a, z1x1.c);
+ Fp2::add(z.a.b, z0x0.b, z1x1.a);
+ Fp2::add(z.a.c, z0x0.c, z1x1.b);
+#else
+ Fp2& z0 = z.a.a;
+ Fp2& z1 = z.a.b;
+ Fp2& z2 = z.a.c;
+ Fp2& z3 = z.b.a;
+ Fp2& z4 = z.b.b;
+ Fp2& z5 = z.b.c;
+ Fp2Dbl Z0B, Z1B, Z2B, Z3C, Z4C, Z5C;
+ Fp2Dbl T0, T1, T2, T3, T4, T5;
+ Fp2 bc, t;
+ Fp2::addPre(bc, b, c);
+ Fp2::addPre(t, z5, z2);
+ Fp2Dbl::mulPre(T5, t, bc);
+ Fp2Dbl::mulPre(Z5C, z5, c);
+ Fp2Dbl::mulPre(Z2B, z2, b);
+ Fp2Dbl::sub(T5, T5, Z5C);
+ Fp2Dbl::sub(T5, T5, Z2B);
+ Fp2Dbl::mulPre(T0, z1, a);
+ T5 += T0;
+
+ Fp2::addPre(t, z4, z1);
+ Fp2Dbl::mulPre(T4, t, bc);
+ Fp2Dbl::mulPre(Z4C, z4, c);
+ Fp2Dbl::mulPre(Z1B, z1, b);
+ Fp2Dbl::sub(T4, T4, Z4C);
+ Fp2Dbl::sub(T4, T4, Z1B);
+ Fp2Dbl::mulPre(T0, z0, a);
+ T4 += T0;
+
+ Fp2::addPre(t, z3, z0);
+ Fp2Dbl::mulPre(T3, t, bc);
+ Fp2Dbl::mulPre(Z3C, z3, c);
+ Fp2Dbl::mulPre(Z0B, z0, b);
+ Fp2Dbl::sub(T3, T3, Z3C);
+ Fp2Dbl::sub(T3, T3, Z0B);
+ Fp2::mul_xi(t, z2);
+ Fp2Dbl::mulPre(T0, t, a);
+ T3 += T0;
+
+ Fp2Dbl::mulPre(T2, z3, a);
+ T2 += Z2B;
+ T2 += Z4C;
+
+ Fp2::mul_xi(t, z5);
+ Fp2Dbl::mulPre(T1, t, a);
+ T1 += Z1B;
+ T1 += Z3C;
+
+ Fp2Dbl::mulPre(T0, z4, a);
+ T0 += Z5C;
+ Fp2Dbl::mul_xi(T0, T0);
+ T0 += Z0B;
+
+ Fp2Dbl::mod(z0, T0);
+ Fp2Dbl::mod(z1, T1);
+ Fp2Dbl::mod(z2, T2);
+ Fp2Dbl::mod(z3, T3);
+ Fp2Dbl::mod(z4, T4);
+ Fp2Dbl::mod(z5, T5);
+#endif
+}
+/*
+ input
+ z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w
+ 0 1 4
+ x = (a, b, c) -> (a, c, 0, 0, b, 0) = X0 + X1w
+ X0 = (a, c, 0)
+ X1 = (0, b, 0)
+ w^2 = v, v^3 = xi
+ output
+ z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w
+ Z0X0 = Z0 (a, c, 0)
+ Z1X1 = Z1 (0, b, 0) = Z1 bv
+ (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (a, b + c, 0)
+
+ (a + bv + cv^2)v = c xi + av + bv^2
+*/
+inline void mul_041(Fp12& z, const Fp6& x)
+{
+ const Fp2& a = x.a;
+ const Fp2& b = x.b;
+ const Fp2& c = x.c;
+ Fp6& z0 = z.a;
+ Fp6& z1 = z.b;
+ Fp6 z0x0, z1x1, t0;
+ Fp2 t1;
+ Fp2::mul(z1x1.a, z1.c, b);
+ Fp2::mul_xi(z1x1.a, z1x1.a);
+ Fp2::mul(z1x1.b, z1.a, b);
+ Fp2::mul(z1x1.c, z1.b, b);
+ Fp2::add(t1, x.b, c);
+ Fp6::add(t0, z0, z1);
+ Fp6mul_01(z0x0, z0, a, c);
+ Fp6mul_01(t0, t0, a, t1);
+ Fp6::sub(z.b, t0, z0x0);
+ z.b -= z1x1;
+ // a + bv + cv^2 = cxi + av + bv^2
+ Fp2::mul_xi(z1x1.c, z1x1.c);
+ Fp2::add(z.a.a, z0x0.a, z1x1.c);
+ Fp2::add(z.a.b, z0x0.b, z1x1.a);
+ Fp2::add(z.a.c, z0x0.c, z1x1.b);
+}
+inline void mulSparse(Fp12& z, const Fp6& x)
+{
+ if (BN::param.cp.isMtype) {
+ mul_041(z, x);
+ } else {
+ mul_403(z, x);
+ }
+}
+inline void convertFp6toFp12(Fp12& y, const Fp6& x)
+{
+ if (BN::param.cp.isMtype) {
+ // (a, b, c) -> (a, c, 0, 0, b, 0)
+ y.a.a = x.a;
+ y.b.b = x.b;
+ y.a.b = x.c;
+ y.a.c.clear();
+ y.b.a.clear();
+ y.b.c.clear();
+ } else {
+ // (a, b, c) -> (b, 0, 0, c, a, 0)
+ y.b.b = x.a;
+ y.a.a = x.b;
+ y.b.a = x.c;
+ y.a.b.clear();
+ y.a.c.clear();
+ y.b.c.clear();
+ }
+}
+inline void mulSparse2(Fp12& z, const Fp6& x, const Fp6& y)
+{
+ convertFp6toFp12(z, x);
+ mulSparse(z, y);
+}
+inline void mapToCyclotomic(Fp12& y, const Fp12& x)
+{
+ Fp12 z;
+ Fp12::Frobenius2(z, x); // z = x^(p^2)
+ z *= x; // x^(p^2 + 1)
+ Fp12::inv(y, z);
+ Fp6::neg(z.b, z.b); // z^(p^6) = conjugate of z
+ y *= z;
+}
+/*
+ Implementing Pairings at the 192-bit Security Level
+ D.F.Aranha, L.F.Castaneda, E.Knapp, A.Menezes, F.R.Henriquez
+ Section 4
+*/
+inline void expHardPartBLS12(Fp12& y, const Fp12& x)
+{
+#if 0
+ const mpz_class& p = param.p;
+ mpz_class p2 = p * p;
+ mpz_class p4 = p2 * p2;
+ Fp12::pow(y, x, (p4 - p2 + 1) / param.r * 3);
+ return;
+#endif
+#if 1
+ Fp12 a0, a1, a2, a3, a4, a5, a6, a7;
+ Fp12::unitaryInv(a0, x); // a0 = x^-1
+ fasterSqr(a1, a0); // x^-2
+ pow_z(a2, x); // x^z
+ fasterSqr(a3, a2); // x^2z
+ a1 *= a2; // a1 = x^(z-2)
+ pow_z(a7, a1); // a7 = x^(z^2-2z)
+ pow_z(a4, a7); // a4 = x^(z^3-2z^2)
+ pow_z(a5, a4); // a5 = x^(z^4-2z^3)
+ a3 *= a5; // a3 = x^(z^4-2z^3+2z)
+ pow_z(a6, a3); // a6 = x^(z^5-2z^4+2z^2)
+
+ Fp12::unitaryInv(a1, a1); // x^(2-z)
+ a1 *= a6; // x^(z^5-2z^4+2z^2-z+2)
+ a1 *= x; // x^(z^5-2z^4+2z^2-z+3) = x^c0
+ a3 *= a0; // x^(z^4-2z^3-1) = x^c1
+ Fp12::Frobenius(a3, a3); // x^(c1 p)
+ a1 *= a3; // x^(c0 + c1 p)
+ a4 *= a2; // x^(z^3-2z^2+z) = x^c2
+ Fp12::Frobenius2(a4, a4); // x^(c2 p^2)
+ a1 *= a4; // x^(c0 + c1 p + c2 p^2)
+ a7 *= x; // x^(z^2-2z+1) = x^c3
+ Fp12::Frobenius3(y, a7);
+ y *= a1;
+#else
+ Fp12 t1, t2, t3;
+ Fp12::Frobenius(t1, x);
+ Fp12::Frobenius(t2, t1);
+ Fp12::Frobenius(t3, t2);
+ Fp12::pow(t1, t1, param.exp_c1);
+ Fp12::pow(t2, t2, param.exp_c2);
+ Fp12::pow(t3, t3, param.exp_c3);
+ Fp12::pow(y, x, param.exp_c0);
+ y *= t1;
+ y *= t2;
+ y *= t3;
+#endif
+}
+/*
+ Faster Hashing to G2
+ Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez
+ section 4.1
+ y = x^(d 2z(6z^2 + 3z + 1)) where
+ p = p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1
+ r = r(z) = 36z^4 + 36z^3 + 18z^2 + 6z + 1
+ d = (p^4 - p^2 + 1) / r
+ d1 = d 2z(6z^2 + 3z + 1)
+ = c0 + c1 p + c2 p^2 + c3 p^3
+
+ c0 = 1 + 6z + 12z^2 + 12z^3
+ c1 = 4z + 6z^2 + 12z^3
+ c2 = 6z + 6z^2 + 12z^3
+ c3 = -1 + 4z + 6z^2 + 12z^3
+ x -> x^z -> x^2z -> x^4z -> x^6z -> x^(6z^2) -> x^(12z^2) -> x^(12z^3)
+ a = x^(6z) x^(6z^2) x^(12z^3)
+ b = a / (x^2z)
+ x^d1 = (a x^(6z^2) x) b^p a^(p^2) (b / x)^(p^3)
+*/
+inline void expHardPartBN(Fp12& y, const Fp12& x)
+{
+#if 0
+ const mpz_class& p = param.p;
+ mpz_class p2 = p * p;
+ mpz_class p4 = p2 * p2;
+ Fp12::pow(y, x, (p4 - p2 + 1) / param.r);
+ return;
+#endif
+#if 1
+ Fp12 a, b;
+ Fp12 a2, a3;
+ pow_z(b, x); // x^z
+ fasterSqr(b, b); // x^2z
+ fasterSqr(a, b); // x^4z
+ a *= b; // x^6z
+ pow_z(a2, a); // x^(6z^2)
+ a *= a2;
+ fasterSqr(a3, a2); // x^(12z^2)
+ pow_z(a3, a3); // x^(12z^3)
+ a *= a3;
+ Fp12::unitaryInv(b, b);
+ b *= a;
+ a2 *= a;
+ Fp12::Frobenius2(a, a);
+ a *= a2;
+ a *= x;
+ Fp12::unitaryInv(y, x);
+ y *= b;
+ Fp12::Frobenius(b, b);
+ a *= b;
+ Fp12::Frobenius3(y, y);
+ y *= a;
+#else
+ Fp12 t1, t2, t3;
+ Fp12::Frobenius(t1, x);
+ Fp12::Frobenius(t2, t1);
+ Fp12::Frobenius(t3, t2);
+ Fp12::pow(t1, t1, param.exp_c1);
+ Fp12::pow(t2, t2, param.exp_c2);
+ Fp12::pow(y, x, param.exp_c0);
+ y *= t1;
+ y *= t2;
+ y *= t3;
+#endif
+}
+/*
+ remark : returned value is NOT on a curve
+*/
+inline G1 makeAdjP(const G1& P)
+{
+ G1 adjP;
+ Fp::add(adjP.x, P.x, P.x);
+ adjP.x += P.x;
+ Fp::neg(adjP.y, P.y);
+ adjP.z = 1;
+ return adjP;
+}
+
+} // mcl::bn::local
+
+/*
+ y = x^((p^12 - 1) / r)
+ (p^12 - 1) / r = (p^2 + 1) (p^6 - 1) (p^4 - p^2 + 1)/r
+ (a + bw)^(p^6) = a - bw in Fp12
+ (p^4 - p^2 + 1)/r = c0 + c1 p + c2 p^2 + p^3
+*/
+inline void finalExp(Fp12& y, const Fp12& x)
+{
+#if 1
+ mapToCyclotomic(y, x);
+#else
+ const mpz_class& p = param.p;
+ mpz_class p2 = p * p;
+ mpz_class p4 = p2 * p2;
+ Fp12::pow(y, x, p2 + 1);
+ Fp12::pow(y, y, p4 * p2 - 1);
+#endif
+ if (BN::param.isBLS12) {
+ expHardPartBLS12(y, y);
+ } else {
+ expHardPartBN(y, y);
+ }
+}
+inline void millerLoop(Fp12& f, const G1& P_, const G2& Q_)
+{
+ G1 P(P_);
+ G2 Q(Q_);
+ P.normalize();
+ Q.normalize();
+ if (Q.isZero()) {
+ f = 1;
+ return;
+ }
+ assert(BN::param.siTbl[1] == 1);
+ G2 T = Q;
+ G2 negQ;
+ if (BN::param.useNAF) {
+ G2::neg(negQ, Q);
+ }
+ Fp6 d, e, l;
+ d = e = l = 1;
+ G1 adjP = makeAdjP(P);
+ dblLine(d, T, adjP);
+ addLine(l, T, Q, P);
+ mulSparse2(f, d, l);
+ for (size_t i = 2; i < BN::param.siTbl.size(); i++) {
+ dblLine(l, T, adjP);
+ Fp12::sqr(f, f);
+ mulSparse(f, l);
+ if (BN::param.siTbl[i]) {
+ if (BN::param.siTbl[i] > 0) {
+ addLine(l, T, Q, P);
+ } else {
+ addLine(l, T, negQ, P);
+ }
+ mulSparse(f, l);
+ }
+ }
+ if (BN::param.z < 0) {
+ G2::neg(T, T);
+ Fp6::neg(f.b, f.b);
+ }
+ if (BN::param.isBLS12) return;
+ G2 Q1, Q2;
+ Frobenius(Q1, Q);
+ Frobenius(Q2, Q1);
+ G2::neg(Q2, Q2);
+ addLine(d, T, Q1, P);
+ addLine(e, T, Q2, P);
+ Fp12 ft;
+ mulSparse2(ft, d, e);
+ f *= ft;
+}
+inline void pairing(Fp12& f, const G1& P, const G2& Q)
+{
+ millerLoop(f, P, Q);
+ finalExp(f, f);
+}
+/*
+ allocate param.precomputedQcoeffSize elements of Fp6 for Qcoeff
+*/
+inline void precomputeG2(Fp6 *Qcoeff, const G2& Q_)
+{
+ size_t idx = 0;
+ G2 Q(Q_);
+ Q.normalize();
+ if (Q.isZero()) {
+ for (size_t i = 0; i < BN::param.precomputedQcoeffSize; i++) {
+ Qcoeff[i] = 1;
+ }
+ return;
+ }
+ G2 T = Q;
+ G2 negQ;
+ if (BN::param.useNAF) {
+ G2::neg(negQ, Q);
+ }
+ assert(BN::param.siTbl[1] == 1);
+ dblLineWithoutP(Qcoeff[idx++], T);
+ addLineWithoutP(Qcoeff[idx++], T, Q);
+ for (size_t i = 2; i < BN::param.siTbl.size(); i++) {
+ dblLineWithoutP(Qcoeff[idx++], T);
+ if (BN::param.siTbl[i]) {
+ if (BN::param.siTbl[i] > 0) {
+ addLineWithoutP(Qcoeff[idx++], T, Q);
+ } else {
+ addLineWithoutP(Qcoeff[idx++], T, negQ);
+ }
+ }
+ }
+ if (BN::param.z < 0) {
+ G2::neg(T, T);
+ }
+ if (BN::param.isBLS12) return;
+ G2 Q1, Q2;
+ Frobenius(Q1, Q);
+ Frobenius(Q2, Q1);
+ G2::neg(Q2, Q2);
+ addLineWithoutP(Qcoeff[idx++], T, Q1);
+ addLineWithoutP(Qcoeff[idx++], T, Q2);
+ assert(idx == BN::param.precomputedQcoeffSize);
+}
+/*
+ millerLoop(e, P, Q) is same as the following
+ std::vector<Fp6> Qcoeff;
+ precomputeG2(Qcoeff, Q);
+ precomputedMillerLoop(e, P, Qcoeff);
+*/
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void precomputeG2(std::vector<Fp6>& Qcoeff, const G2& Q)
+{
+ Qcoeff.resize(BN::param.precomputedQcoeffSize);
+ precomputeG2(Qcoeff.data(), Q);
+}
+#endif
+template<class Array>
+void precomputeG2(bool *pb, Array& Qcoeff, const G2& Q)
+{
+ *pb = Qcoeff.resize(BN::param.precomputedQcoeffSize);
+ if (!*pb) return;
+ precomputeG2(Qcoeff.data(), Q);
+}
+
+inline void precomputedMillerLoop(Fp12& f, const G1& P_, const Fp6* Qcoeff)
+{
+ G1 P(P_);
+ P.normalize();
+ G1 adjP = makeAdjP(P);
+ size_t idx = 0;
+ Fp6 d, e, l;
+ mulFp6cb_by_G1xy(d, Qcoeff[idx], adjP);
+ idx++;
+
+ mulFp6cb_by_G1xy(e, Qcoeff[idx], P);
+ idx++;
+ mulSparse2(f, d, e);
+ for (size_t i = 2; i < BN::param.siTbl.size(); i++) {
+ mulFp6cb_by_G1xy(l, Qcoeff[idx], adjP);
+ idx++;
+ Fp12::sqr(f, f);
+ mulSparse(f, l);
+ if (BN::param.siTbl[i]) {
+ mulFp6cb_by_G1xy(l, Qcoeff[idx], P);
+ idx++;
+ mulSparse(f, l);
+ }
+ }
+ if (BN::param.z < 0) {
+ Fp6::neg(f.b, f.b);
+ }
+ if (BN::param.isBLS12) return;
+ mulFp6cb_by_G1xy(d, Qcoeff[idx], P);
+ idx++;
+ mulFp6cb_by_G1xy(e, Qcoeff[idx], P);
+ idx++;
+ Fp12 ft;
+ mulSparse2(ft, d, e);
+ f *= ft;
+}
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void precomputedMillerLoop(Fp12& f, const G1& P, const std::vector<Fp6>& Qcoeff)
+{
+ precomputedMillerLoop(f, P, Qcoeff.data());
+}
+#endif
+/*
+ f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2)
+ Q2coeff : precomputed Q2
+*/
+inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1_, const G2& Q1_, const G1& P2_, const Fp6* Q2coeff)
+{
+ G1 P1(P1_), P2(P2_);
+ G2 Q1(Q1_);
+ P1.normalize();
+ P2.normalize();
+ Q1.normalize();
+ if (Q1.isZero()) {
+ precomputedMillerLoop(f, P2_, Q2coeff);
+ return;
+ }
+ G2 T = Q1;
+ G2 negQ1;
+ if (BN::param.useNAF) {
+ G2::neg(negQ1, Q1);
+ }
+ G1 adjP1 = makeAdjP(P1);
+ G1 adjP2 = makeAdjP(P2);
+ size_t idx = 0;
+ Fp6 d1, d2, e1, e2, l1, l2;
+ dblLine(d1, T, adjP1);
+ mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2);
+ idx++;
+
+ Fp12 f1, f2;
+ e1 = 1;
+ addLine(e1, T, Q1, P1);
+ mulSparse2(f1, d1, e1);
+
+ mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2);
+ mulSparse2(f2, d2, e2);
+ Fp12::mul(f, f1, f2);
+ idx++;
+ for (size_t i = 2; i < BN::param.siTbl.size(); i++) {
+ dblLine(l1, T, adjP1);
+ mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2);
+ idx++;
+ Fp12::sqr(f, f);
+ mulSparse2(f1, l1, l2);
+ f *= f1;
+ if (BN::param.siTbl[i]) {
+ if (BN::param.siTbl[i] > 0) {
+ addLine(l1, T, Q1, P1);
+ } else {
+ addLine(l1, T, negQ1, P1);
+ }
+ mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2);
+ idx++;
+ mulSparse2(f1, l1, l2);
+ f *= f1;
+ }
+ }
+ if (BN::param.z < 0) {
+ G2::neg(T, T);
+ Fp6::neg(f.b, f.b);
+ }
+ if (BN::param.isBLS12) return;
+ G2 Q11, Q12;
+ Frobenius(Q11, Q1);
+ Frobenius(Q12, Q11);
+ G2::neg(Q12, Q12);
+ addLine(d1, T, Q11, P1);
+ mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2);
+ idx++;
+ addLine(e1, T, Q12, P1);
+ mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2);
+ idx++;
+ mulSparse2(f1, d1, e1);
+ mulSparse2(f2, d2, e2);
+ f *= f1;
+ f *= f2;
+}
+/*
+ f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2)
+ Q1coeff, Q2coeff : precomputed Q1, Q2
+*/
+inline void precomputedMillerLoop2(Fp12& f, const G1& P1_, const Fp6* Q1coeff, const G1& P2_, const Fp6* Q2coeff)
+{
+ G1 P1(P1_), P2(P2_);
+ P1.normalize();
+ P2.normalize();
+ G1 adjP1 = makeAdjP(P1);
+ G1 adjP2 = makeAdjP(P2);
+ size_t idx = 0;
+ Fp6 d1, d2, e1, e2, l1, l2;
+ mulFp6cb_by_G1xy(d1, Q1coeff[idx], adjP1);
+ mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2);
+ idx++;
+
+ Fp12 f1, f2;
+ mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1);
+ mulSparse2(f1, d1, e1);
+
+ mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2);
+ mulSparse2(f2, d2, e2);
+ Fp12::mul(f, f1, f2);
+ idx++;
+ for (size_t i = 2; i < BN::param.siTbl.size(); i++) {
+ mulFp6cb_by_G1xy(l1, Q1coeff[idx], adjP1);
+ mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2);
+ idx++;
+ Fp12::sqr(f, f);
+ mulSparse2(f1, l1, l2);
+ f *= f1;
+ if (BN::param.siTbl[i]) {
+ mulFp6cb_by_G1xy(l1, Q1coeff[idx], P1);
+ mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2);
+ idx++;
+ mulSparse2(f1, l1, l2);
+ f *= f1;
+ }
+ }
+ if (BN::param.z < 0) {
+ Fp6::neg(f.b, f.b);
+ }
+ if (BN::param.isBLS12) return;
+ mulFp6cb_by_G1xy(d1, Q1coeff[idx], P1);
+ mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2);
+ idx++;
+ mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1);
+ mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2);
+ idx++;
+ mulSparse2(f1, d1, e1);
+ mulSparse2(f2, d2, e2);
+ f *= f1;
+ f *= f2;
+}
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void precomputedMillerLoop2(Fp12& f, const G1& P1, const std::vector<Fp6>& Q1coeff, const G1& P2, const std::vector<Fp6>& Q2coeff)
+{
+ precomputedMillerLoop2(f, P1, Q1coeff.data(), P2, Q2coeff.data());
+}
+inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1, const G2& Q1, const G1& P2, const std::vector<Fp6>& Q2coeff)
+{
+ precomputedMillerLoop2mixed(f, P1, Q1, P2, Q2coeff.data());
+}
+#endif
+inline void mapToG1(bool *pb, G1& P, const Fp& x) { *pb = BN::param.mapTo.calcG1(P, x); }
+inline void mapToG2(bool *pb, G2& P, const Fp2& x) { *pb = BN::param.mapTo.calcG2(P, x); }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void mapToG1(G1& P, const Fp& x)
+{
+ bool b;
+ mapToG1(&b, P, x);
+ if (!b) throw cybozu::Exception("mapToG1:bad value") << x;
+}
+inline void mapToG2(G2& P, const Fp2& x)
+{
+ bool b;
+ mapToG2(&b, P, x);
+ if (!b) throw cybozu::Exception("mapToG2:bad value") << x;
+}
+#endif
+inline void hashAndMapToG1(G1& P, const void *buf, size_t bufSize)
+{
+ Fp t;
+ t.setHashOf(buf, bufSize);
+ bool b;
+ mapToG1(&b, P, t);
+ // It will not happen that the hashed value is equal to special value
+ assert(b);
+ (void)b;
+}
+inline void hashAndMapToG2(G2& P, const void *buf, size_t bufSize)
+{
+ Fp2 t;
+ t.a.setHashOf(buf, bufSize);
+ t.b.clear();
+ bool b;
+ mapToG2(&b, P, t);
+ // It will not happen that the hashed value is equal to special value
+ assert(b);
+ (void)b;
+}
+#ifndef CYBOZU_DONT_USE_STRING
+inline void hashAndMapToG1(G1& P, const std::string& str)
+{
+ hashAndMapToG1(P, str.c_str(), str.size());
+}
+inline void hashAndMapToG2(G2& P, const std::string& str)
+{
+ hashAndMapToG2(P, str.c_str(), str.size());
+}
+#endif
+inline void verifyOrderG1(bool doVerify)
+{
+ if (BN::param.isBLS12) {
+ G1::setOrder(doVerify ? BN::param.r : 0);
+ }
+}
+inline void verifyOrderG2(bool doVerify)
+{
+ G2::setOrder(doVerify ? BN::param.r : 0);
+}
+
+// backward compatibility
+using mcl::CurveParam;
+static const CurveParam& CurveFp254BNb = BN254;
+static const CurveParam& CurveFp382_1 = BN381_1;
+static const CurveParam& CurveFp382_2 = BN381_2;
+static const CurveParam& CurveFp462 = BN462;
+static const CurveParam& CurveSNARK1 = BN_SNARK1;
+
+/*
+ FrobeniusOnTwist for Dtype
+ p mod 6 = 1, w^6 = xi
+ Frob(x', y') = phi Frob phi^-1(x', y')
+ = phi Frob (x' w^2, y' w^3)
+ = phi (x'^p w^2p, y'^p w^3p)
+ = (F(x') w^2(p - 1), F(y') w^3(p - 1))
+ = (F(x') g^2, F(y') g^3)
+
+ FrobeniusOnTwist for Dtype
+ use (1/g) instead of g
+*/
+inline void Frobenius(G2& D, const G2& S)
+{
+ Fp2::Frobenius(D.x, S.x);
+ Fp2::Frobenius(D.y, S.y);
+ Fp2::Frobenius(D.z, S.z);
+ D.x *= BN::param.g2;
+ D.y *= BN::param.g3;
+}
+inline void Frobenius2(G2& D, const G2& S)
+{
+ Frobenius(D, S);
+ Frobenius(D, D);
+}
+inline void Frobenius3(G2& D, const G2& S)
+{
+ Frobenius(D, S);
+ Frobenius(D, D);
+ Frobenius(D, D);
+}
+
+namespace BN {
+
+using namespace mcl::bn; // backward compatibility
+
+inline void init(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO)
+{
+ local::StaticVar<>::param.init(pb, cp, mode);
+ if (!*pb) return;
+ G1::setMulArrayGLV(local::mulArrayGLV1);
+ G2::setMulArrayGLV(local::mulArrayGLV2);
+ Fp12::setPowArrayGLV(local::powArrayGLV2);
+ G1::setCompressedExpression();
+ G2::setCompressedExpression();
+ *pb = true;
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void init(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO)
+{
+ bool b;
+ init(&b, cp, mode);
+ if (!b) throw cybozu::Exception("BN:init");
+}
+#endif
+
+} // mcl::bn::BN
+
+inline void initPairing(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO)
+{
+ BN::init(pb, cp, mode);
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void initPairing(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO)
+{
+ bool b;
+ BN::init(&b, cp, mode);
+ if (!b) throw cybozu::Exception("bn:initPairing");
+}
+#endif
+
+inline void initG1only(bool *pb, const mcl::EcParam& para)
+{
+ local::StaticVar<>::param.initG1only(pb, para);
+ if (!*pb) return;
+ G1::setMulArrayGLV(0);
+ G2::setMulArrayGLV(0);
+ Fp12::setPowArrayGLV(0);
+ G1::setCompressedExpression();
+ G2::setCompressedExpression();
+}
+
+inline const G1& getG1basePoint()
+{
+ return local::StaticVar<>::param.basePoint;
+}
+
+} } // mcl::bn
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bn256.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/bn256.hpp
new file mode 100644
index 000000000..7a5da7a05
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bn256.hpp
@@ -0,0 +1,15 @@
+#pragma once
+/**
+ @file
+ @brief preset class for 256-bit optimal ate pairing over BN curves
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#define MCL_MAX_FP_BIT_SIZE 256
+#include <mcl/bn.hpp>
+
+namespace mcl { namespace bn256 {
+using namespace mcl::bn;
+} }
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bn384.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/bn384.hpp
new file mode 100644
index 000000000..8aa14fe5c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bn384.hpp
@@ -0,0 +1,15 @@
+#pragma once
+/**
+ @file
+ @brief preset class for 384-bit optimal ate pairing over BN curves
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#define MCL_MAX_FP_BIT_SIZE 384
+#include <mcl/bn.hpp>
+// #define MCL_MAX_FR_BIT_SIZE 256 // can set if BLS12_381
+
+namespace mcl { namespace bn384 {
+using namespace mcl::bn;
+} }
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/bn512.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/bn512.hpp
new file mode 100644
index 000000000..c87ad9035
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/bn512.hpp
@@ -0,0 +1,14 @@
+#pragma once
+/**
+ @file
+ @brief preset class for 512-bit optimal ate pairing over BN curves
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#define MCL_MAX_FP_BIT_SIZE 512
+#include <mcl/bn.hpp>
+
+namespace mcl { namespace bn512 {
+using namespace mcl::bn;
+} }
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/conversion.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/conversion.hpp
new file mode 100644
index 000000000..7a04b7fa2
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/conversion.hpp
@@ -0,0 +1,495 @@
+#pragma once
+#include <cybozu/itoa.hpp>
+#include <cybozu/stream.hpp>
+/**
+ @file
+ @brief convertion bin/dec/hex <=> array
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4127)
+#endif
+
+namespace mcl { namespace fp {
+
+namespace local {
+
+inline bool isSpace(char c)
+{
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n';
+}
+template<class InputStream>
+bool skipSpace(char *c, InputStream& is)
+{
+ for (;;) {
+ if (!cybozu::readChar(c, is)) return false;
+ if (!isSpace(*c)) return true;
+ }
+}
+
+#ifndef CYBOZU_DONT_USE_STRING
+template<class InputStream>
+void loadWord(std::string& s, InputStream& is)
+{
+ s.clear();
+ char c;
+ if (!skipSpace(&c, is)) return;
+ s = c;
+ for (;;) {
+ if (!cybozu::readChar(&c, is)) return;
+ if (isSpace(c)) break;
+ s += c;
+ }
+}
+#endif
+
+template<class InputStream>
+size_t loadWord(char *buf, size_t bufSize, InputStream& is)
+{
+ if (bufSize == 0) return 0;
+ char c;
+ if (!skipSpace(&c, is)) return 0;
+ size_t pos = 0;
+ buf[pos++] = c;
+ for (;;) {
+ if (!cybozu::readChar(&c, is)) break;
+ if (isSpace(c)) break;
+ if (pos == bufSize) return 0;
+ buf[pos++] = c;
+ }
+ return pos;
+}
+
+
+/*
+ q = x[] / x
+ @retval r = x[] % x
+ @note accept q == x
+*/
+inline uint32_t divU32(uint32_t *q, const uint32_t *x, size_t xn, uint32_t y)
+{
+ if (xn == 0) return 0;
+ uint32_t r = 0;
+ for (int i = (int)xn - 1; i >= 0; i--) {
+ uint64_t t = (uint64_t(r) << 32) | x[i];
+ q[i] = uint32_t(t / y);
+ r = uint32_t(t % y);
+ }
+ return r;
+}
+
+/*
+ z[0, xn) = x[0, xn) * y
+ return z[xn]
+ @note accept z == x
+*/
+inline uint32_t mulU32(uint32_t *z, const uint32_t *x, size_t xn, uint32_t y)
+{
+ uint32_t H = 0;
+ for (size_t i = 0; i < xn; i++) {
+ uint32_t t = H;
+ uint64_t v = uint64_t(x[i]) * y;
+ uint32_t L = uint32_t(v);
+ H = uint32_t(v >> 32);
+ z[i] = t + L;
+ if (z[i] < t) {
+ H++;
+ }
+ }
+ return H;
+}
+
+/*
+ x[0, xn) += y
+ return 1 if overflow else 0
+*/
+inline uint32_t addU32(uint32_t *x, size_t xn, uint32_t y)
+{
+ uint32_t t = x[0] + y;
+ x[0] = t;
+ if (t >= y) return 0;
+ for (size_t i = 1; i < xn; i++) {
+ t = x[i] + 1;
+ x[i] = t;
+ if (t != 0) return 0;
+ }
+ return 1;
+}
+
+inline uint32_t decToU32(const char *p, size_t size, bool *pb)
+{
+ assert(0 < size && size <= 9);
+ uint32_t x = 0;
+ for (size_t i = 0; i < size; i++) {
+ char c = p[i];
+ if (c < '0' || c > '9') {
+ *pb = false;
+ return 0;
+ }
+ x = x * 10 + uint32_t(c - '0');
+ }
+ *pb = true;
+ return x;
+}
+
+inline bool hexCharToUint8(uint8_t *v, char _c)
+{
+ uint32_t c = uint8_t(_c); // cast is necessary
+ if (c - '0' <= '9' - '0') {
+ c = c - '0';
+ } else if (c - 'a' <= 'f' - 'a') {
+ c = (c - 'a') + 10;
+ } else if (c - 'A' <= 'F' - 'A') {
+ c = (c - 'A') + 10;
+ } else {
+ return false;
+ }
+ *v = uint8_t(c);
+ return true;
+}
+
+template<class UT>
+bool hexToUint(UT *px, const char *p, size_t size)
+{
+ assert(0 < size && size <= sizeof(UT) * 2);
+ UT x = 0;
+ for (size_t i = 0; i < size; i++) {
+ uint8_t v;
+ if (!hexCharToUint8(&v, p[i])) return false;
+ x = x * 16 + v;
+ }
+ *px = x;
+ return true;
+}
+
+template<class UT>
+bool binToUint(UT *px, const char *p, size_t size)
+{
+ assert(0 < size && size <= sizeof(UT) * 8);
+ UT x = 0;
+ for (size_t i = 0; i < size; i++) {
+ UT c = static_cast<uint8_t>(p[i]);
+ if (c == '0') {
+ x = x * 2;
+ } else if (c == '1') {
+ x = x * 2 + 1;
+ } else {
+ return false;
+ }
+ }
+ *px = x;
+ return true;
+}
+
+inline bool parsePrefix(size_t *readSize, bool *isMinus, int *base, const char *buf, size_t bufSize)
+{
+ if (bufSize == 0) return false;
+ size_t pos = 0;
+ if (*buf == '-') {
+ if (bufSize == 1) return false;
+ *isMinus = true;
+ buf++;
+ pos++;
+ } else {
+ *isMinus = false;
+ }
+ if (buf[0] == '0') {
+ if (bufSize > 1 && buf[1] == 'x') {
+ if (*base == 0 || *base == 16) {
+ *base = 16;
+ pos += 2;
+ } else {
+ return false;
+ }
+ } else if (bufSize > 1 && buf[1] == 'b') {
+ if (*base == 0 || *base == 2) {
+ *base = 2;
+ pos += 2;
+ } else {
+ return false;
+ }
+ }
+ }
+ if (*base == 0) *base = 10;
+ if (pos == bufSize) return false;
+ *readSize = pos;
+ return true;
+}
+
+} // mcl::fp::local
+
+/*
+ convert little endian x[0, xn) to buf
+ return written size if success else 0
+ data is buf[bufSize - retval, bufSize)
+ start "0x" if withPrefix
+*/
+template<class T>
+size_t arrayToHex(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix = false)
+{
+ size_t fullN = 0;
+ if (n > 1) {
+ size_t pos = n - 1;
+ while (pos > 0) {
+ if (x[pos]) break;
+ pos--;
+ }
+ if (pos > 0) fullN = pos;
+ }
+ const T v = n == 0 ? 0 : x[fullN];
+ const size_t topLen = cybozu::getHexLength(v);
+ const size_t startPos = withPrefix ? 2 : 0;
+ const size_t lenT = sizeof(T) * 2;
+ const size_t totalSize = startPos + fullN * lenT + topLen;
+ if (totalSize > bufSize) return 0;
+ char *const top = buf + bufSize - totalSize;
+ if (withPrefix) {
+ top[0] = '0';
+ top[1] = 'x';
+ }
+ cybozu::itohex(&top[startPos], topLen, v, false);
+ for (size_t i = 0; i < fullN; i++) {
+ cybozu::itohex(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i], false);
+ }
+ return totalSize;
+}
+
+/*
+ convert little endian x[0, xn) to buf
+ return written size if success else 0
+ data is buf[bufSize - retval, bufSize)
+ start "0b" if withPrefix
+*/
+template<class T>
+size_t arrayToBin(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix)
+{
+ size_t fullN = 0;
+ if (n > 1) {
+ size_t pos = n - 1;
+ while (pos > 0) {
+ if (x[pos]) break;
+ pos--;
+ }
+ if (pos > 0) fullN = pos;
+ }
+ const T v = n == 0 ? 0 : x[fullN];
+ const size_t topLen = cybozu::getBinLength(v);
+ const size_t startPos = withPrefix ? 2 : 0;
+ const size_t lenT = sizeof(T) * 8;
+ const size_t totalSize = startPos + fullN * lenT + topLen;
+ if (totalSize > bufSize) return 0;
+ char *const top = buf + bufSize - totalSize;
+ if (withPrefix) {
+ top[0] = '0';
+ top[1] = 'b';
+ }
+ cybozu::itobin(&top[startPos], topLen, v);
+ for (size_t i = 0; i < fullN; i++) {
+ cybozu::itobin(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i]);
+ }
+ return totalSize;
+}
+
+/*
+ convert hex string to x[0..xn)
+ hex string = [0-9a-fA-F]+
+*/
+template<class UT>
+inline size_t hexToArray(UT *x, size_t maxN, const char *buf, size_t bufSize)
+{
+ if (bufSize == 0) return 0;
+ const size_t unitLen = sizeof(UT) * 2;
+ const size_t q = bufSize / unitLen;
+ const size_t r = bufSize % unitLen;
+ const size_t requireSize = q + (r ? 1 : 0);
+ if (maxN < requireSize) return 0;
+ for (size_t i = 0; i < q; i++) {
+ if (!local::hexToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0;
+ }
+ if (r) {
+ if (!local::hexToUint(&x[q], buf, r)) return 0;
+ }
+ return requireSize;
+}
+/*
+ convert bin string to x[0..xn)
+ bin string = [01]+
+*/
+template<class UT>
+inline size_t binToArray(UT *x, size_t maxN, const char *buf, size_t bufSize)
+{
+ if (bufSize == 0) return 0;
+ const size_t unitLen = sizeof(UT) * 8;
+ const size_t q = bufSize / unitLen;
+ const size_t r = bufSize % unitLen;
+ const size_t requireSize = q + (r ? 1 : 0);
+ if (maxN < requireSize) return 0;
+ for (size_t i = 0; i < q; i++) {
+ if (!local::binToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0;
+ }
+ if (r) {
+ if (!local::binToUint(&x[q], buf, r)) return 0;
+ }
+ return requireSize;
+}
+
+/*
+ little endian x[0, xn) to buf
+ return written size if success else 0
+ data is buf[bufSize - retval, bufSize)
+*/
+template<class UT>
+inline size_t arrayToDec(char *buf, size_t bufSize, const UT *x, size_t xn)
+{
+ const size_t maxN = 64;
+ uint32_t t[maxN];
+ if (sizeof(UT) == 8) {
+ xn *= 2;
+ }
+ if (xn > maxN) return 0;
+ memcpy(t, x, xn * sizeof(t[0]));
+
+ const size_t width = 9;
+ const uint32_t i1e9 = 1000000000U;
+ size_t pos = 0;
+ for (;;) {
+ uint32_t r = local::divU32(t, t, xn, i1e9);
+ while (xn > 0 && t[xn - 1] == 0) xn--;
+ size_t len = cybozu::itoa_local::uintToDec(buf, bufSize - pos, r);
+ if (len == 0) return 0;
+ assert(0 < len && len <= width);
+ if (xn == 0) return pos + len;
+ // fill (width - len) '0'
+ for (size_t j = 0; j < width - len; j++) {
+ buf[bufSize - pos - width + j] = '0';
+ }
+ pos += width;
+ }
+}
+
+/*
+ convert buf[0, bufSize) to x[0, num)
+ return written num if success else 0
+*/
+template<class UT>
+inline size_t decToArray(UT *_x, size_t maxN, const char *buf, size_t bufSize)
+{
+ assert(sizeof(UT) == 4 || sizeof(UT) == 8);
+ const size_t width = 9;
+ const uint32_t i1e9 = 1000000000U;
+ if (maxN == 0) return 0;
+ if (sizeof(UT) == 8) {
+ maxN *= 2;
+ }
+ uint32_t *x = reinterpret_cast<uint32_t*>(_x);
+ size_t xn = 1;
+ x[0] = 0;
+ while (bufSize > 0) {
+ size_t n = bufSize % width;
+ if (n == 0) n = width;
+ bool b;
+ uint32_t v = local::decToU32(buf, n, &b);
+ if (!b) return 0;
+ uint32_t H = local::mulU32(x, x, xn, i1e9);
+ if (H > 0) {
+ if (xn == maxN) return 0;
+ x[xn++] = H;
+ }
+ H = local::addU32(x, xn, v);
+ if (H > 0) {
+ if (xn == maxN) return 0;
+ x[xn++] = H;
+ }
+ buf += n;
+ bufSize -= n;
+ }
+ if (sizeof(UT) == 8 && (xn & 1)) {
+ x[xn++] = 0;
+ }
+ return xn / (sizeof(UT) / 4);
+}
+
+/*
+ return retavl is written size if success else 0
+ REMARK : the top of string is buf + bufSize - retval
+*/
+template<class UT>
+size_t arrayToStr(char *buf, size_t bufSize, const UT *x, size_t n, int base, bool withPrefix)
+{
+ switch (base) {
+ case 0:
+ case 10:
+ return arrayToDec(buf, bufSize, x, n);
+ case 16:
+ return arrayToHex(buf, bufSize, x, n, withPrefix);
+ case 2:
+ return arrayToBin(buf, bufSize, x, n, withPrefix);
+ default:
+ return 0;
+ }
+}
+
+template<class UT>
+size_t strToArray(bool *pIsMinus, UT *x, size_t xN, const char *buf, size_t bufSize, int ioMode)
+{
+ ioMode &= 31;
+ size_t readSize;
+ if (!local::parsePrefix(&readSize, pIsMinus, &ioMode, buf, bufSize)) return 0;
+ switch (ioMode) {
+ case 10:
+ return decToArray(x, xN, buf + readSize, bufSize - readSize);
+ case 16:
+ return hexToArray(x, xN, buf + readSize, bufSize - readSize);
+ case 2:
+ return binToArray(x, xN, buf + readSize, bufSize - readSize);
+ default:
+ return 0;
+ }
+}
+
+/*
+ convert src[0, n) to (n * 2) byte hex string and write it to os
+ return true if success else flase
+*/
+template<class OutputStream>
+void writeHexStr(bool *pb, OutputStream& os, const void *src, size_t n)
+{
+ const uint8_t *p = (const uint8_t *)src;
+ for (size_t i = 0; i < n; i++) {
+ char hex[2];
+ cybozu::itohex(hex, sizeof(hex), p[i], false);
+ cybozu::write(pb, os, hex, sizeof(hex));
+ if (!*pb) return;
+ }
+ *pb = true;
+}
+/*
+ read hex string from is and convert it to byte array
+ return written buffer size
+*/
+template<class InputStream>
+inline size_t readHexStr(void *buf, size_t n, InputStream& is)
+{
+ bool b;
+ uint8_t *dst = (uint8_t *)buf;
+ for (size_t i = 0; i < n; i++) {
+ uint8_t L, H;
+ char c[2];
+ if (cybozu::readSome(c, sizeof(c), is) != sizeof(c)) return i;
+ b = local::hexCharToUint8(&H, c[0]);
+ if (!b) return i;
+ b = local::hexCharToUint8(&L, c[1]);
+ if (!b) return i;
+ dst[i] = (H << 4) | L;
+ }
+ return n;
+}
+
+} } // mcl::fp
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/curve_type.h b/vendor/github.com/tangerine-network/mcl/include/mcl/curve_type.h
new file mode 100644
index 000000000..9e4a941a0
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/curve_type.h
@@ -0,0 +1,35 @@
+#pragma once
+/**
+ @file
+ @brief curve type
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+
+enum {
+ MCL_BN254 = 0,
+ MCL_BN381_1 = 1,
+ MCL_BN381_2 = 2,
+ MCL_BN462 = 3,
+ MCL_BN_SNARK1 = 4,
+ MCL_BLS12_381 = 5,
+ MCL_BN160 = 6,
+
+ /*
+ for only G1
+ the size of curve must be less or equal to MCLBN_FP_UNIT_SIZE
+ */
+ MCL_EC_BEGIN = 100,
+ MCL_SECP192K1 = MCL_EC_BEGIN,
+ MCL_SECP224K1 = 101,
+ MCL_SECP256K1 = 102,
+ MCL_SECP384R1 = 103,
+ MCL_SECP521R1 = 104,
+ MCL_NIST_P192 = 105,
+ MCL_NIST_P224 = 106,
+ MCL_NIST_P256 = 107,
+ MCL_EC_END = MCL_NIST_P256 + 1,
+ MCL_NIST_P384 = MCL_SECP384R1,
+ MCL_NIST_P521 = MCL_SECP521R1
+};
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/ec.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/ec.hpp
new file mode 100644
index 000000000..b8eb10be3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/ec.hpp
@@ -0,0 +1,1045 @@
+#pragma once
+/**
+ @file
+ @brief elliptic curve
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <stdlib.h>
+#include <cybozu/exception.hpp>
+#include <mcl/op.hpp>
+#include <mcl/util.hpp>
+
+//#define MCL_EC_USE_AFFINE
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4458)
+#endif
+
+namespace mcl {
+
+namespace ec {
+
+enum Mode {
+ Jacobi = 0,
+ Proj = 1
+};
+
+} // mcl::ec
+
+/*
+ elliptic curve
+ y^2 = x^3 + ax + b (affine)
+ y^2 = x^3 + az^4 + bz^6 (Jacobi) x = X/Z^2, y = Y/Z^3
+*/
+template<class _Fp>
+class EcT : public fp::Serializable<EcT<_Fp> > {
+ enum {
+ zero,
+ minus3,
+ generic
+ };
+public:
+ typedef _Fp Fp;
+ typedef _Fp BaseFp;
+#ifdef MCL_EC_USE_AFFINE
+ Fp x, y;
+ bool inf_;
+#else
+ Fp x, y, z;
+ static int mode_;
+#endif
+ static Fp a_;
+ static Fp b_;
+ static int specialA_;
+ static int ioMode_;
+ /*
+ order_ is the order of G2 which is the subgroup of EcT<Fp2>.
+ check the order of the elements if verifyOrder_ is true
+ */
+ static bool verifyOrder_;
+ static mpz_class order_;
+ static void (*mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime);
+ /* default constructor is undefined value */
+ EcT() {}
+ EcT(const Fp& _x, const Fp& _y)
+ {
+ set(_x, _y);
+ }
+ bool isNormalized() const
+ {
+#ifdef MCL_EC_USE_AFFINE
+ return true;
+#else
+ return isZero() || z.isOne();
+#endif
+ }
+#ifndef MCL_EC_USE_AFFINE
+private:
+ void normalizeJacobi()
+ {
+ assert(!z.isZero());
+ Fp rz2;
+ Fp::inv(z, z);
+ Fp::sqr(rz2, z);
+ x *= rz2;
+ y *= rz2;
+ y *= z;
+ z = 1;
+ }
+ void normalizeProj()
+ {
+ assert(!z.isZero());
+ Fp::inv(z, z);
+ x *= z;
+ y *= z;
+ z = 1;
+ }
+ // Y^2 == X(X^2 + aZ^4) + bZ^6
+ bool isValidJacobi() const
+ {
+ Fp y2, x2, z2, z4, t;
+ Fp::sqr(x2, x);
+ Fp::sqr(y2, y);
+ Fp::sqr(z2, z);
+ Fp::sqr(z4, z2);
+ Fp::mul(t, z4, a_);
+ t += x2;
+ t *= x;
+ z4 *= z2;
+ z4 *= b_;
+ t += z4;
+ return y2 == t;
+ }
+ // (Y^2 - bZ^2)Z = X(X^2 + aZ^2)
+ bool isValidProj() const
+ {
+ Fp y2, x2, z2, t;
+ Fp::sqr(x2, x);
+ Fp::sqr(y2, y);
+ Fp::sqr(z2, z);
+ Fp::mul(t, a_, z2);
+ t += x2;
+ t *= x;
+ z2 *= b_;
+ y2 -= z2;
+ y2 *= z;
+ return y2 == t;
+ }
+#endif
+ // y^2 == (x^2 + a)x + b
+ static inline bool isValid(const Fp& _x, const Fp& _y)
+ {
+ Fp y2, t;
+ Fp::sqr(y2, _y);
+ Fp::sqr(t, _x);
+ t += a_;
+ t *= _x;
+ t += b_;
+ return y2 == t;
+ }
+public:
+ void normalize()
+ {
+#ifndef MCL_EC_USE_AFFINE
+ if (isNormalized()) return;
+ switch (mode_) {
+ case ec::Jacobi:
+ normalizeJacobi();
+ break;
+ case ec::Proj:
+ normalizeProj();
+ break;
+ }
+#endif
+ }
+ static void normalize(EcT& y, const EcT& x)
+ {
+ y = x;
+ y.normalize();
+ }
+ static inline void init(const Fp& a, const Fp& b, int mode = ec::Jacobi)
+ {
+ a_ = a;
+ b_ = b;
+ if (a_.isZero()) {
+ specialA_ = zero;
+ } else if (a_ == -3) {
+ specialA_ = minus3;
+ } else {
+ specialA_ = generic;
+ }
+ ioMode_ = 0;
+ verifyOrder_ = false;
+ order_ = 0;
+ mulArrayGLV = 0;
+#ifdef MCL_EC_USE_AFFINE
+ cybozu::disable_warning_unused_variable(mode);
+#else
+ assert(mode == ec::Jacobi || mode == ec::Proj);
+ mode_ = mode;
+#endif
+ }
+ /*
+ verify the order of *this is equal to order if order != 0
+ in constructor, set, setStr, operator<<().
+ */
+ static void setOrder(const mpz_class& order)
+ {
+ if (order != 0) {
+ verifyOrder_ = true;
+ order_ = order;
+ } else {
+ verifyOrder_ = false;
+ // don't clear order_ because it is used for isValidOrder()
+ }
+ }
+ static void setMulArrayGLV(void f(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime))
+ {
+ mulArrayGLV = f;
+ }
+ static inline void init(bool *pb, const char *astr, const char *bstr, int mode = ec::Jacobi)
+ {
+ Fp a, b;
+ a.setStr(pb, astr);
+ if (!*pb) return;
+ b.setStr(pb, bstr);
+ if (!*pb) return;
+ init(a, b, mode);
+ }
+ // verify the order
+ bool isValidOrder() const
+ {
+ EcT Q;
+ EcT::mulGeneric(Q, *this, order_);
+ return Q.isZero();
+ }
+ bool isValid() const
+ {
+ if (isZero()) return true;
+ bool isOK = false;
+#ifndef MCL_EC_USE_AFFINE
+ if (!z.isOne()) {
+ switch (mode_) {
+ case ec::Jacobi:
+ isOK = isValidJacobi();
+ break;
+ case ec::Proj:
+ isOK = isValidProj();
+ break;
+ }
+ } else
+#endif
+ {
+ isOK = isValid(x, y);
+ }
+ if (!isOK) return false;
+ if (verifyOrder_) return isValidOrder();
+ return true;
+ }
+ void set(bool *pb, const Fp& _x, const Fp& _y, bool verify = true)
+ {
+ if (verify && !isValid(_x, _y)) {
+ *pb = false;
+ return;
+ }
+ x = _x; y = _y;
+#ifdef MCL_EC_USE_AFFINE
+ inf_ = false;
+#else
+ z = 1;
+#endif
+ if (verify && verifyOrder_ && !isValidOrder()) {
+ *pb = false;
+ } else {
+ *pb = true;
+ }
+ }
+ void clear()
+ {
+#ifdef MCL_EC_USE_AFFINE
+ inf_ = true;
+#else
+ z.clear();
+#endif
+ x.clear();
+ y.clear();
+ }
+#ifndef MCL_EC_USE_AFFINE
+ static inline void dblNoVerifyInfJacobi(EcT& R, const EcT& P)
+ {
+ Fp S, M, t, y2;
+ Fp::sqr(y2, P.y);
+ Fp::mul(S, P.x, y2);
+ const bool isPzOne = P.z.isOne();
+ S += S;
+ S += S;
+ Fp::sqr(M, P.x);
+ switch (specialA_) {
+ case zero:
+ Fp::add(t, M, M);
+ M += t;
+ break;
+ case minus3:
+ if (isPzOne) {
+ M -= P.z;
+ } else {
+ Fp::sqr(t, P.z);
+ Fp::sqr(t, t);
+ M -= t;
+ }
+ Fp::add(t, M, M);
+ M += t;
+ break;
+ case generic:
+ default:
+ if (isPzOne) {
+ t = a_;
+ } else {
+ Fp::sqr(t, P.z);
+ Fp::sqr(t, t);
+ t *= a_;
+ }
+ t += M;
+ M += M;
+ M += t;
+ break;
+ }
+ Fp::sqr(R.x, M);
+ R.x -= S;
+ R.x -= S;
+ if (isPzOne) {
+ R.z = P.y;
+ } else {
+ Fp::mul(R.z, P.y, P.z);
+ }
+ R.z += R.z;
+ Fp::sqr(y2, y2);
+ y2 += y2;
+ y2 += y2;
+ y2 += y2;
+ Fp::sub(R.y, S, R.x);
+ R.y *= M;
+ R.y -= y2;
+ }
+ static inline void dblNoVerifyInfProj(EcT& R, const EcT& P)
+ {
+ const bool isPzOne = P.z.isOne();
+ Fp w, t, h;
+ switch (specialA_) {
+ case zero:
+ Fp::sqr(w, P.x);
+ Fp::add(t, w, w);
+ w += t;
+ break;
+ case minus3:
+ Fp::sqr(w, P.x);
+ if (isPzOne) {
+ w -= P.z;
+ } else {
+ Fp::sqr(t, P.z);
+ w -= t;
+ }
+ Fp::add(t, w, w);
+ w += t;
+ break;
+ case generic:
+ default:
+ if (isPzOne) {
+ w = a_;
+ } else {
+ Fp::sqr(w, P.z);
+ w *= a_;
+ }
+ Fp::sqr(t, P.x);
+ w += t;
+ w += t;
+ w += t; // w = a z^2 + 3x^2
+ break;
+ }
+ if (isPzOne) {
+ R.z = P.y;
+ } else {
+ Fp::mul(R.z, P.y, P.z); // s = yz
+ }
+ Fp::mul(t, R.z, P.x);
+ t *= P.y; // xys
+ t += t;
+ t += t; // 4(xys) ; 4B
+ Fp::sqr(h, w);
+ h -= t;
+ h -= t; // w^2 - 8B
+ Fp::mul(R.x, h, R.z);
+ t -= h; // h is free
+ t *= w;
+ Fp::sqr(w, P.y);
+ R.x += R.x;
+ R.z += R.z;
+ Fp::sqr(h, R.z);
+ w *= h;
+ R.z *= h;
+ Fp::sub(R.y, t, w);
+ R.y -= w;
+ }
+#endif
+ static inline void dblNoVerifyInf(EcT& R, const EcT& P)
+ {
+#ifdef MCL_EC_USE_AFFINE
+ Fp t, s;
+ Fp::sqr(t, P.x);
+ Fp::add(s, t, t);
+ t += s;
+ t += a_;
+ Fp::add(s, P.y, P.y);
+ t /= s;
+ Fp::sqr(s, t);
+ s -= P.x;
+ Fp x3;
+ Fp::sub(x3, s, P.x);
+ Fp::sub(s, P.x, x3);
+ s *= t;
+ Fp::sub(R.y, s, P.y);
+ R.x = x3;
+ R.inf_ = false;
+#else
+ switch (mode_) {
+ case ec::Jacobi:
+ dblNoVerifyInfJacobi(R, P);
+ break;
+ case ec::Proj:
+ dblNoVerifyInfProj(R, P);
+ break;
+ }
+#endif
+ }
+ static inline void dbl(EcT& R, const EcT& P)
+ {
+ if (P.isZero()) {
+ R.clear();
+ return;
+ }
+ dblNoVerifyInf(R, P);
+ }
+#ifndef MCL_EC_USE_AFFINE
+ static inline void addJacobi(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne)
+ {
+ Fp r, U1, S1, H, H3;
+ if (isPzOne) {
+ // r = 1;
+ } else {
+ Fp::sqr(r, P.z);
+ }
+ if (isQzOne) {
+ U1 = P.x;
+ if (isPzOne) {
+ H = Q.x;
+ } else {
+ Fp::mul(H, Q.x, r);
+ }
+ H -= U1;
+ S1 = P.y;
+ } else {
+ Fp::sqr(S1, Q.z);
+ Fp::mul(U1, P.x, S1);
+ if (isPzOne) {
+ H = Q.x;
+ } else {
+ Fp::mul(H, Q.x, r);
+ }
+ H -= U1;
+ S1 *= Q.z;
+ S1 *= P.y;
+ }
+ if (isPzOne) {
+ r = Q.y;
+ } else {
+ r *= P.z;
+ r *= Q.y;
+ }
+ r -= S1;
+ if (H.isZero()) {
+ if (r.isZero()) {
+ dblNoVerifyInf(R, P);
+ } else {
+ R.clear();
+ }
+ return;
+ }
+ if (isPzOne) {
+ R.z = H;
+ } else {
+ Fp::mul(R.z, P.z, H);
+ }
+ if (!isQzOne) {
+ R.z *= Q.z;
+ }
+ Fp::sqr(H3, H); // H^2
+ Fp::sqr(R.y, r); // r^2
+ U1 *= H3; // U1 H^2
+ H3 *= H; // H^3
+ R.y -= U1;
+ R.y -= U1;
+ Fp::sub(R.x, R.y, H3);
+ U1 -= R.x;
+ U1 *= r;
+ H3 *= S1;
+ Fp::sub(R.y, U1, H3);
+ }
+ static inline void addProj(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne)
+ {
+ Fp r, PyQz, v, A, vv;
+ if (isQzOne) {
+ r = P.x;
+ PyQz = P.y;
+ } else {
+ Fp::mul(r, P.x, Q.z);
+ Fp::mul(PyQz, P.y, Q.z);
+ }
+ if (isPzOne) {
+ A = Q.y;
+ v = Q.x;
+ } else {
+ Fp::mul(A, Q.y, P.z);
+ Fp::mul(v, Q.x, P.z);
+ }
+ v -= r;
+ if (v.isZero()) {
+ if (A == PyQz) {
+ dblNoVerifyInf(R, P);
+ } else {
+ R.clear();
+ }
+ return;
+ }
+ Fp::sub(R.y, A, PyQz);
+ Fp::sqr(A, R.y);
+ Fp::sqr(vv, v);
+ r *= vv;
+ vv *= v;
+ if (isQzOne) {
+ R.z = P.z;
+ } else {
+ if (isPzOne) {
+ R.z = Q.z;
+ } else {
+ Fp::mul(R.z, P.z, Q.z);
+ }
+ }
+ // R.z = 1 if isPzOne && isQzOne
+ if (isPzOne && isQzOne) {
+ R.z = vv;
+ } else {
+ A *= R.z;
+ R.z *= vv;
+ }
+ A -= vv;
+ vv *= PyQz;
+ A -= r;
+ A -= r;
+ Fp::mul(R.x, v, A);
+ r -= A;
+ R.y *= r;
+ R.y -= vv;
+ }
+#endif
+ static inline void add(EcT& R, const EcT& P, const EcT& Q) {
+ if (P.isZero()) { R = Q; return; }
+ if (Q.isZero()) { R = P; return; }
+ if (&P == &Q) {
+ dblNoVerifyInf(R, P);
+ return;
+ }
+#ifdef MCL_EC_USE_AFFINE
+ Fp t;
+ Fp::neg(t, Q.y);
+ if (P.y == t) { R.clear(); return; }
+ Fp::sub(t, Q.x, P.x);
+ if (t.isZero()) {
+ dblNoVerifyInf(R, P);
+ return;
+ }
+ Fp s;
+ Fp::sub(s, Q.y, P.y);
+ Fp::div(t, s, t);
+ R.inf_ = false;
+ Fp x3;
+ Fp::sqr(x3, t);
+ x3 -= P.x;
+ x3 -= Q.x;
+ Fp::sub(s, P.x, x3);
+ s *= t;
+ Fp::sub(R.y, s, P.y);
+ R.x = x3;
+#else
+ bool isPzOne = P.z.isOne();
+ bool isQzOne = Q.z.isOne();
+ switch (mode_) {
+ case ec::Jacobi:
+ addJacobi(R, P, Q, isPzOne, isQzOne);
+ break;
+ case ec::Proj:
+ addProj(R, P, Q, isPzOne, isQzOne);
+ break;
+ }
+#endif
+ }
+ static inline void sub(EcT& R, const EcT& P, const EcT& Q)
+ {
+ EcT nQ;
+ neg(nQ, Q);
+ add(R, P, nQ);
+ }
+ static inline void neg(EcT& R, const EcT& P)
+ {
+ if (P.isZero()) {
+ R.clear();
+ return;
+ }
+ R.x = P.x;
+ Fp::neg(R.y, P.y);
+#ifdef MCL_EC_USE_AFFINE
+ R.inf_ = false;
+#else
+ R.z = P.z;
+#endif
+ }
+ template<class tag, size_t maxBitSize, template<class _tag, size_t _maxBitSize>class FpT>
+ static inline void mul(EcT& z, const EcT& x, const FpT<tag, maxBitSize>& y)
+ {
+ fp::Block b;
+ y.getBlock(b);
+ mulArray(z, x, b.p, b.n, false);
+ }
+ static inline void mul(EcT& z, const EcT& x, int64_t y)
+ {
+ const uint64_t u = fp::abs_(y);
+#if MCL_SIZEOF_UNIT == 8
+ mulArray(z, x, &u, 1, y < 0);
+#else
+ uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) };
+ size_t un = ua[1] ? 2 : 1;
+ mulArray(z, x, ua, un, y < 0);
+#endif
+ }
+ static inline void mul(EcT& z, const EcT& x, const mpz_class& y)
+ {
+ mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0);
+ }
+ template<class tag, size_t maxBitSize, template<class _tag, size_t _maxBitSize>class FpT>
+ static inline void mulCT(EcT& z, const EcT& x, const FpT<tag, maxBitSize>& y)
+ {
+ fp::Block b;
+ y.getBlock(b);
+ mulArray(z, x, b.p, b.n, false, true);
+ }
+ static inline void mulCT(EcT& z, const EcT& x, const mpz_class& y)
+ {
+ mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true);
+ }
+ /*
+ 0 <= P for any P
+ (Px, Py) <= (P'x, P'y) iff Px < P'x or Px == P'x and Py <= P'y
+ @note compare function calls normalize()
+ */
+ template<class F>
+ static inline int compareFunc(const EcT& P_, const EcT& Q_, F comp)
+ {
+ const bool QisZero = Q_.isZero();
+ if (P_.isZero()) {
+ if (QisZero) return 0;
+ return -1;
+ }
+ if (QisZero) return 1;
+ EcT P(P_), Q(Q_);
+ P.normalize();
+ Q.normalize();
+ int c = comp(P.x, Q.x);
+ if (c > 0) return 1;
+ if (c < 0) return -1;
+ return comp(P.y, Q.y);
+ }
+ static inline int compare(const EcT& P, const EcT& Q)
+ {
+ return compareFunc(P, Q, Fp::compare);
+ }
+ static inline int compareRaw(const EcT& P, const EcT& Q)
+ {
+ return compareFunc(P, Q, Fp::compareRaw);
+ }
+ bool isZero() const
+ {
+#ifdef MCL_EC_USE_AFFINE
+ return inf_;
+#else
+ return z.isZero();
+#endif
+ }
+ static inline bool isMSBserialize()
+ {
+ return !b_.isZero() && (Fp::BaseFp::getBitSize() & 7) != 0;
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ if (ioMode & IoEcProj) {
+ cybozu::writeChar(pb, os, '4'); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ x.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ y.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+#ifndef MCL_EC_USE_AFFINE
+ z.save(pb, os, ioMode);
+#endif
+ return;
+ }
+ EcT P(*this);
+ P.normalize();
+ if (ioMode & (IoSerialize | IoSerializeHexStr)) {
+ /*
+ if (isMSBserialize()) {
+ // n bytes
+ x | (y.isOdd ? 0x80 : 0)
+ } else {
+ // n + 1 bytes
+ (y.isOdd ? 3 : 2), x
+ }
+ */
+ const size_t n = Fp::getByteSize();
+ const size_t adj = isMSBserialize() ? 0 : 1;
+ char buf[sizeof(Fp) + 1];
+ if (isZero()) {
+ memset(buf, 0, n + adj);
+ } else {
+ cybozu::MemoryOutputStream mos(buf + adj, n);
+ P.x.save(pb, mos, IoSerialize); if (!*pb) return;
+ if (adj) {
+ buf[0] = P.y.isOdd() ? 3 : 2;
+ } else {
+ if (P.y.isOdd()) {
+ buf[n - 1] |= 0x80;
+ }
+ }
+ }
+ if (ioMode & IoSerializeHexStr) {
+ mcl::fp::writeHexStr(pb, os, buf, n + adj);
+ } else {
+ cybozu::write(pb, os, buf, n + adj);
+ }
+ return;
+ }
+ if (isZero()) {
+ cybozu::writeChar(pb, os, '0');
+ return;
+ }
+ if (ioMode & IoEcCompY) {
+ cybozu::writeChar(pb, os, P.y.isOdd() ? '3' : '2');
+ if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ P.x.save(pb, os, ioMode);
+ } else {
+ cybozu::writeChar(pb, os, '1'); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ P.x.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ P.y.save(pb, os, ioMode);
+ }
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+#ifdef MCL_EC_USE_AFFINE
+ inf_ = false;
+#else
+ z = 1;
+#endif
+ if (ioMode & (IoSerialize | IoSerializeHexStr)) {
+ const size_t n = Fp::getByteSize();
+ const size_t adj = isMSBserialize() ? 0 : 1;
+ const size_t n1 = n + adj;
+ char buf[sizeof(Fp) + 1];
+ size_t readSize;
+ if (ioMode & IoSerializeHexStr) {
+ readSize = mcl::fp::readHexStr(buf, n1, is);
+ } else {
+ readSize = cybozu::readSome(buf, n1, is);
+ }
+ if (readSize != n1) {
+ *pb = false;
+ return;
+ }
+ if (fp::isZeroArray(buf, n1)) {
+ clear();
+ *pb = true;
+ return;
+ }
+ bool isYodd;
+ if (adj) {
+ char c = buf[0];
+ if (c != 2 && c != 3) {
+ *pb = false;
+ return;
+ }
+ isYodd = c == 3;
+ } else {
+ isYodd = (buf[n - 1] >> 7) != 0;
+ buf[n - 1] &= 0x7f;
+ }
+ x.setArray(pb, buf + adj, n);
+ if (!*pb) return;
+ *pb = getYfromX(y, x, isYodd);
+ if (!*pb) return;
+ } else {
+ char c = 0;
+ if (!fp::local::skipSpace(&c, is)) {
+ *pb = false;
+ return;
+ }
+ if (c == '0') {
+ clear();
+ *pb = true;
+ return;
+ }
+ x.load(pb, is, ioMode); if (!*pb) return;
+ if (c == '1') {
+ y.load(pb, is, ioMode); if (!*pb) return;
+ if (!isValid(x, y)) {
+ *pb = false;
+ return;
+ }
+ } else if (c == '2' || c == '3') {
+ bool isYodd = c == '3';
+ *pb = getYfromX(y, x, isYodd);
+ if (!*pb) return;
+ } else if (c == '4') {
+ y.load(pb, is, ioMode); if (!*pb) return;
+#ifndef MCL_EC_USE_AFFINE
+ z.load(pb, is, ioMode); if (!*pb) return;
+#endif
+ } else {
+ *pb = false;
+ return;
+ }
+ }
+ if (verifyOrder_ && !isValidOrder()) {
+ *pb = false;
+ } else {
+ *pb = true;
+ }
+ }
+ // deplicated
+ static void setCompressedExpression(bool compressedExpression = true)
+ {
+ if (compressedExpression) {
+ ioMode_ |= IoEcCompY;
+ } else {
+ ioMode_ &= ~IoEcCompY;
+ }
+ }
+ /*
+ set IoMode for operator<<(), or operator>>()
+ */
+ static void setIoMode(int ioMode)
+ {
+ assert(!(ioMode & 0xff));
+ ioMode_ = ioMode;
+ }
+ static inline int getIoMode() { return Fp::BaseFp::getIoMode() | ioMode_; }
+ static inline void getWeierstrass(Fp& yy, const Fp& x)
+ {
+ Fp t;
+ Fp::sqr(t, x);
+ t += a_;
+ t *= x;
+ Fp::add(yy, t, b_);
+ }
+ static inline bool getYfromX(Fp& y, const Fp& x, bool isYodd)
+ {
+ getWeierstrass(y, x);
+ if (!Fp::squareRoot(y, y)) {
+ return false;
+ }
+ if (y.isOdd() ^ isYodd) {
+ Fp::neg(y, y);
+ }
+ return true;
+ }
+ inline friend EcT operator+(const EcT& x, const EcT& y) { EcT z; add(z, x, y); return z; }
+ inline friend EcT operator-(const EcT& x, const EcT& y) { EcT z; sub(z, x, y); return z; }
+ template<class INT>
+ inline friend EcT operator*(const EcT& x, const INT& y) { EcT z; mul(z, x, y); return z; }
+ EcT& operator+=(const EcT& x) { add(*this, *this, x); return *this; }
+ EcT& operator-=(const EcT& x) { sub(*this, *this, x); return *this; }
+ template<class INT>
+ EcT& operator*=(const INT& x) { mul(*this, *this, x); return *this; }
+ EcT operator-() const { EcT x; neg(x, *this); return x; }
+ bool operator==(const EcT& rhs) const
+ {
+ EcT R;
+ sub(R, *this, rhs); // QQQ : optimized later
+ return R.isZero();
+ }
+ bool operator!=(const EcT& rhs) const { return !operator==(rhs); }
+ bool operator<(const EcT& rhs) const
+ {
+ return compare(*this, rhs) < 0;
+ }
+ bool operator>=(const EcT& rhs) const { return !operator<(rhs); }
+ bool operator>(const EcT& rhs) const { return rhs < *this; }
+ bool operator<=(const EcT& rhs) const { return !operator>(rhs); }
+ static inline void mulArray(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime = false)
+ {
+ if (!constTime && x.isZero()) {
+ z.clear();
+ return;
+ }
+ if (mulArrayGLV && (constTime || yn > 1)) {
+ mulArrayGLV(z, x, y, yn, isNegative, constTime);
+ return;
+ }
+ mulArrayBase(z, x, y, yn, isNegative, constTime);
+ }
+ static inline void mulArrayBase(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime)
+ {
+ EcT tmp;
+ const EcT *px = &x;
+ if (&z == &x) {
+ tmp = x;
+ px = &tmp;
+ }
+ z.clear();
+ fp::powGeneric(z, *px, y, yn, EcT::add, EcT::dbl, EcT::normalize, constTime ? Fp::BaseFp::getBitSize() : 0);
+ if (isNegative) {
+ neg(z, z);
+ }
+ }
+ /*
+ generic mul
+ */
+ static inline void mulGeneric(EcT& z, const EcT& x, const mpz_class& y, bool constTime = false)
+ {
+ mulArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, constTime);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ static inline void init(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi)
+ {
+ bool b;
+ init(&b, astr.c_str(), bstr.c_str(), mode);
+ if (!b) throw cybozu::Exception("mcl:EcT:init");
+ }
+ void set(const Fp& _x, const Fp& _y, bool verify = true)
+ {
+ bool b;
+ set(&b, _x, _y, verify);
+ if (!b) throw cybozu::Exception("ec:EcT:set") << _x << _y;
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("EcT:save");
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("EcT:load");
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_STRING
+ // backward compatilibity
+ static inline void setParam(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi)
+ {
+ init(astr, bstr, mode);
+ }
+ friend inline std::istream& operator>>(std::istream& is, EcT& self)
+ {
+ self.load(is, fp::detectIoMode(getIoMode(), is));
+ return is;
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const EcT& self)
+ {
+ self.save(os, fp::detectIoMode(getIoMode(), os));
+ return os;
+ }
+#endif
+};
+
+template<class Fp> Fp EcT<Fp>::a_;
+template<class Fp> Fp EcT<Fp>::b_;
+template<class Fp> int EcT<Fp>::specialA_;
+template<class Fp> int EcT<Fp>::ioMode_;
+template<class Fp> bool EcT<Fp>::verifyOrder_;
+template<class Fp> mpz_class EcT<Fp>::order_;
+template<class Fp> void (*EcT<Fp>::mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime);
+#ifndef MCL_EC_USE_AFFINE
+template<class Fp> int EcT<Fp>::mode_;
+#endif
+
+struct EcParam {
+ const char *name;
+ const char *p;
+ const char *a;
+ const char *b;
+ const char *gx;
+ const char *gy;
+ const char *n;
+ size_t bitSize; // bit length of p
+ int curveType;
+};
+
+} // mcl
+
+#ifdef CYBOZU_USE_BOOST
+namespace mcl {
+template<class Fp>
+size_t hash_value(const mcl::EcT<Fp>& P_)
+{
+ if (P_.isZero()) return 0;
+ mcl::EcT<Fp> P(P_); P.normalize();
+ return mcl::hash_value(P.y, mcl::hash_value(P.x));
+}
+
+}
+#else
+namespace std { CYBOZU_NAMESPACE_TR1_BEGIN
+
+template<class Fp>
+struct hash<mcl::EcT<Fp> > {
+ size_t operator()(const mcl::EcT<Fp>& P_) const
+ {
+ if (P_.isZero()) return 0;
+ mcl::EcT<Fp> P(P_); P.normalize();
+ return hash<Fp>()(P.y, hash<Fp>()(P.x));
+ }
+};
+
+CYBOZU_NAMESPACE_TR1_END } // std
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.h b/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.h
new file mode 100644
index 000000000..daeb6be53
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.h
@@ -0,0 +1,105 @@
+#pragma once
+/**
+ @file
+ @brief C interface of ECDSA
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <stdint.h> // for uint64_t, uint8_t
+#include <stdlib.h> // for size_t
+
+#if defined(_MSC_VER)
+ #ifdef ECDSA_DLL_EXPORT
+ #define ECDSA_DLL_API __declspec(dllexport)
+ #else
+ #define ECDSA_DLL_API __declspec(dllimport)
+ #ifndef ECDSA_NO_AUTOLINK
+ #pragma comment(lib, "mclecdsa.lib")
+ #endif
+ #endif
+#elif defined(__EMSCRIPTEN__)
+ #define ECDSA_DLL_API __attribute__((used))
+#else
+ #define ECDSA_DLL_API
+#endif
+
+#ifndef mclSize
+ #ifdef __EMSCRIPTEN__
+ // avoid 64-bit integer
+ #define mclSize unsigned int
+ #define mclInt int
+ #else
+ // use #define for cgo
+ #define mclSize size_t
+ #define mclInt int64_t
+ #endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef ECDSA_NOT_DEFINE_STRUCT
+
+typedef struct ecdsaSecretKey ecdsaSecretKey;
+typedef struct ecdsaPublicKey ecdsaPublicKey;
+typedef struct ecdsaSignature ecdsaSignature;
+
+#else
+
+typedef struct {
+ uint64_t d[4];
+} ecdsaSecretKey;
+
+typedef struct {
+ uint64_t d[4 * 3];
+} ecdsaPublicKey;
+
+typedef struct {
+ uint64_t d[4 * 2];
+} ecdsaSignature;
+
+#endif
+
+struct ecdsaPrecomputedPublicKey;
+
+/*
+ init library
+ return 0 if success
+ @note not threadsafe
+*/
+ECDSA_DLL_API int ecdsaInit(void);
+
+// return written byte size if success else 0
+ECDSA_DLL_API mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec);
+ECDSA_DLL_API mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub);
+ECDSA_DLL_API mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig);
+
+// return read byte size if sucess else 0
+ECDSA_DLL_API mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize);
+ECDSA_DLL_API mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize);
+ECDSA_DLL_API mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize);
+
+// return 0 if success
+ECDSA_DLL_API int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec);
+
+ECDSA_DLL_API void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec);
+
+ECDSA_DLL_API void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size);
+
+// return 1 if valid
+ECDSA_DLL_API int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size);
+ECDSA_DLL_API int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *pub, const void *m, mclSize size);
+
+// return nonzero if success
+ECDSA_DLL_API ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate();
+// call this function to avoid memory leak
+ECDSA_DLL_API void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub);
+// return 0 if success
+ECDSA_DLL_API int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub);
+
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.hpp
new file mode 100644
index 000000000..cf3ed3f65
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/ecdsa.hpp
@@ -0,0 +1,257 @@
+#pragma once
+/**
+ @file
+ @brief ECDSA
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/fp.hpp>
+#include <mcl/ec.hpp>
+#include <mcl/ecparam.hpp>
+#include <mcl/window_method.hpp>
+
+namespace mcl { namespace ecdsa {
+
+namespace local {
+
+#ifndef MCLSHE_WIN_SIZE
+ #define MCLSHE_WIN_SIZE 10
+#endif
+static const size_t winSize = MCLSHE_WIN_SIZE;
+
+struct FpTag;
+struct ZnTag;
+
+} // mcl::ecdsa::local
+
+typedef mcl::FpT<local::FpTag, 256> Fp;
+typedef mcl::FpT<local::ZnTag, 256> Zn;
+typedef mcl::EcT<Fp> Ec;
+
+namespace local {
+
+struct Param {
+ mcl::EcParam ecParam;
+ Ec P;
+ mcl::fp::WindowMethod<Ec> Pbase;
+};
+
+inline Param& getParam()
+{
+ static Param p;
+ return p;
+}
+
+inline void be32toZn(Zn& x, const mcl::fp::Unit *buf)
+{
+ const size_t n = 32;
+ const unsigned char *p = (const unsigned char*)buf;
+ unsigned char be[n];
+ for (size_t i = 0; i < n; i++) {
+ be[i] = p[n - 1 - i];
+ }
+ x.setArrayMaskMod(be, n);
+}
+
+/*
+ y = x mod n
+*/
+inline void FpToZn(Zn& y, const Fp& x)
+{
+ fp::Block b;
+ x.getBlock(b);
+ y.setArrayMaskMod(b.p, b.n);
+}
+
+inline void setHashOf(Zn& x, const void *msg, size_t msgSize)
+{
+ mcl::fp::Unit xBuf[256 / 8 / sizeof(mcl::fp::Unit)];
+ uint32_t hashSize = mcl::fp::sha256(xBuf, sizeof(xBuf), msg, (uint32_t)msgSize);
+ assert(hashSize == sizeof(xBuf));
+ (void)hashSize;
+ be32toZn(x, xBuf);
+}
+
+} // mcl::ecdsa::local
+
+const local::Param& param = local::getParam();
+
+inline void init(bool *pb)
+{
+ const mcl::EcParam& ecParam = mcl::ecparam::secp256k1;
+ Zn::init(pb, ecParam.n);
+ if (!*pb) return;
+ Fp::init(pb, ecParam.p);
+ if (!*pb) return;
+ Ec::init(pb, ecParam.a, ecParam.b);
+ if (!*pb) return;
+ Zn::setIoMode(16);
+ Fp::setIoMode(16);
+ Ec::setIoMode(mcl::IoEcAffine);
+ local::Param& p = local::getParam();
+ p.ecParam = ecParam;
+ Fp x, y;
+ x.setStr(pb, ecParam.gx);
+ if (!*pb) return;
+ y.setStr(pb, ecParam.gy);
+ if (!*pb) return;
+ p.P.set(pb, x, y);
+ if (!*pb) return;
+ p.Pbase.init(pb, p.P, ecParam.bitSize, local::winSize);
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void init()
+{
+ bool b;
+ init(&b);
+ if (!b) throw cybozu::Exception("ecdsa:init");
+}
+#endif
+
+typedef Zn SecretKey;
+typedef Ec PublicKey;
+
+struct PrecomputedPublicKey {
+ mcl::fp::WindowMethod<Ec> pubBase_;
+ void init(bool *pb, const PublicKey& pub)
+ {
+ pubBase_.init(pb, pub, param.ecParam.bitSize, local::winSize);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void init(const PublicKey& pub)
+ {
+ bool b;
+ init(&b, pub);
+ if (!b) throw cybozu::Exception("ecdsa:PrecomputedPublicKey:init");
+ }
+#endif
+};
+
+inline void getPublicKey(PublicKey& pub, const SecretKey& sec)
+{
+ Ec::mul(pub, param.P, sec);
+ pub.normalize();
+}
+
+struct Signature : public mcl::fp::Serializable<Signature> {
+ Zn r, s;
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ r.load(pb, is, ioMode); if (!*pb) return;
+ s.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ r.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ s.save(pb, os, ioMode);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("ecdsa:Signature:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("ecdsa:Signature:save");
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_STRING
+ friend std::istream& operator>>(std::istream& is, Signature& self)
+ {
+ self.load(is, fp::detectIoMode(Ec::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const Signature& self)
+ {
+ self.save(os, fp::detectIoMode(Ec::getIoMode(), os));
+ return os;
+ }
+#endif
+};
+
+inline void sign(Signature& sig, const SecretKey& sec, const void *msg, size_t msgSize)
+{
+ Zn& r = sig.r;
+ Zn& s = sig.s;
+ Zn z, k;
+ local::setHashOf(z, msg, msgSize);
+ Ec Q;
+ for (;;) {
+ k.setByCSPRNG();
+ param.Pbase.mul(Q, k);
+ if (Q.isZero()) continue;
+ Q.normalize();
+ local::FpToZn(r, Q.x);
+ if (r.isZero()) continue;
+ Zn::mul(s, r, sec);
+ s += z;
+ if (s.isZero()) continue;
+ s /= k;
+ return;
+ }
+}
+
+namespace local {
+
+inline void mulDispatch(Ec& Q, const PublicKey& pub, const Zn& y)
+{
+ Ec::mul(Q, pub, y);
+}
+
+inline void mulDispatch(Ec& Q, const PrecomputedPublicKey& ppub, const Zn& y)
+{
+ ppub.pubBase_.mul(Q, y);
+}
+
+template<class Pub>
+inline bool verify(const Signature& sig, const Pub& pub, const void *msg, size_t msgSize)
+{
+ const Zn& r = sig.r;
+ const Zn& s = sig.s;
+ if (r.isZero() || s.isZero()) return false;
+ Zn z, w, u1, u2;
+ local::setHashOf(z, msg, msgSize);
+ Zn::inv(w, s);
+ Zn::mul(u1, z, w);
+ Zn::mul(u2, r, w);
+ Ec Q1, Q2;
+ param.Pbase.mul(Q1, u1);
+// Ec::mul(Q2, pub, u2);
+ local::mulDispatch(Q2, pub, u2);
+ Q1 += Q2;
+ if (Q1.isZero()) return false;
+ Q1.normalize();
+ Zn x;
+ local::FpToZn(x, Q1.x);
+ return r == x;
+}
+
+} // mcl::ecdsa::local
+
+inline bool verify(const Signature& sig, const PublicKey& pub, const void *msg, size_t msgSize)
+{
+ return local::verify(sig, pub, msg, msgSize);
+}
+
+inline bool verify(const Signature& sig, const PrecomputedPublicKey& ppub, const void *msg, size_t msgSize)
+{
+ return local::verify(sig, ppub, msg, msgSize);
+}
+
+} } // mcl::ecdsa
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/ecparam.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/ecparam.hpp
new file mode 100644
index 000000000..087bf8b6c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/ecparam.hpp
@@ -0,0 +1,191 @@
+#pragma once
+/**
+ @file
+ @brief Elliptic curve parameter
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/ec.hpp>
+#include <mcl/curve_type.h>
+
+namespace mcl { namespace ecparam {
+
+const struct mcl::EcParam secp160k1 = {
+ "secp160k1",
+ "0xfffffffffffffffffffffffffffffffeffffac73",
+ "0",
+ "7",
+ "0x3b4c382ce37aa192a4019e763036f4f5dd4d7ebb",
+ "0x938cf935318fdced6bc28286531733c3f03c4fee",
+ "0x100000000000000000001b8fa16dfab9aca16b6b3",
+ 160,
+ -1
+};
+// p=2^160 + 7
+const struct mcl::EcParam p160_1 = {
+ "p160_1",
+ "0x10000000000000000000000000000000000000007",
+ "10",
+ "1343632762150092499701637438970764818528075565078",
+ "1",
+ "1236612389951462151661156731535316138439983579284",
+ "1461501637330902918203683518218126812711137002561",
+ 161,
+ -1
+};
+const struct mcl::EcParam secp192k1 = {
+ "secp192k1",
+ "0xfffffffffffffffffffffffffffffffffffffffeffffee37",
+ "0",
+ "3",
+ "0xdb4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d",
+ "0x9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d",
+ "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d",
+ 192,
+ MCL_SECP192K1
+};
+const struct mcl::EcParam secp224k1 = {
+ "secp224k1",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffeffffe56d",
+ "0",
+ "5",
+ "0xa1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c",
+ "0x7e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5",
+ "0x10000000000000000000000000001dce8d2ec6184caf0a971769fb1f7",
+ 224,
+ MCL_SECP224K1
+};
+const struct mcl::EcParam secp256k1 = {
+ "secp256k1",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ "0",
+ "7",
+ "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
+ "0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
+ "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141",
+ 256,
+ MCL_SECP256K1
+};
+const struct mcl::EcParam secp384r1 = {
+ "secp384r1",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff",
+ "-3",
+ "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
+ "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7",
+ "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973",
+ 384,
+ MCL_SECP384R1
+};
+const struct mcl::EcParam secp521r1 = {
+ "secp521r1",
+ "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "-3",
+ "0x51953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00",
+ "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66",
+ "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650",
+ "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409",
+ 521,
+ MCL_SECP521R1
+};
+const struct mcl::EcParam NIST_P192 = {
+ "NIST_P192",
+ "0xfffffffffffffffffffffffffffffffeffffffffffffffff",
+ "-3",
+ "0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
+ "0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
+ "0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
+ "0xffffffffffffffffffffffff99def836146bc9b1b4d22831",
+ 192,
+ MCL_NIST_P192
+};
+const struct mcl::EcParam NIST_P224 = {
+ "NIST_P224",
+ "0xffffffffffffffffffffffffffffffff000000000000000000000001",
+ "-3",
+ "0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
+ "0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
+ "0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
+ "0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d",
+ 224,
+ MCL_NIST_P224
+};
+const struct mcl::EcParam NIST_P256 = {
+ "NIST_P256",
+ "0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
+ "-3",
+ "0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
+ "0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
+ "0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
+ "0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
+ 256,
+ MCL_NIST_P256
+};
+// same secp384r1
+const struct mcl::EcParam NIST_P384 = {
+ "NIST_P384",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff",
+ "-3",
+ "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
+ "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7",
+ "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973",
+ 384,
+ MCL_NIST_P384
+};
+// same secp521r1
+const struct mcl::EcParam NIST_P521 = {
+ "NIST_P521",
+ "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "-3",
+ "0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00",
+ "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66",
+ "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650",
+ "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409",
+ 521,
+ MCL_NIST_P521
+};
+
+} // mcl::ecparam
+
+#ifndef CYBOZU_DONT_USE_STRING
+static inline const mcl::EcParam* getEcParam(const std::string& name)
+{
+ static const mcl::EcParam *tbl[] = {
+ &ecparam::p160_1,
+ &ecparam::secp160k1,
+ &ecparam::secp192k1,
+ &ecparam::secp224k1,
+ &ecparam::secp256k1,
+ &ecparam::secp384r1,
+ &ecparam::secp521r1,
+
+ &ecparam::NIST_P192,
+ &ecparam::NIST_P224,
+ &ecparam::NIST_P256,
+ &ecparam::NIST_P384,
+ &ecparam::NIST_P521,
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ if (name == tbl[i]->name) return tbl[i];
+ }
+ throw cybozu::Exception("mcl::getEcParam:not support name") << name;
+}
+#endif
+
+inline const mcl::EcParam* getEcParam(int curve)
+{
+ switch (curve) {
+ case MCL_SECP192K1: return &ecparam::secp192k1;
+ case MCL_SECP224K1: return &ecparam::secp224k1;
+ case MCL_SECP256K1: return &ecparam::secp256k1;
+ case MCL_SECP384R1: return &ecparam::secp384r1;
+ case MCL_NIST_P192: return &ecparam::NIST_P192;
+ case MCL_NIST_P224: return &ecparam::NIST_P224;
+ case MCL_NIST_P256: return &ecparam::NIST_P256;
+ default: return 0;
+ }
+}
+
+} // mcl
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/elgamal.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/elgamal.hpp
new file mode 100644
index 000000000..431148508
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/elgamal.hpp
@@ -0,0 +1,612 @@
+#pragma once
+/**
+ @file
+ @brief lifted-ElGamal encryption
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+
+ original:
+ Copyright (c) 2014, National Institute of Advanced Industrial
+ Science and Technology All rights reserved.
+ This source file is subject to BSD 3-Clause license.
+*/
+#include <string>
+#include <sstream>
+#include <cybozu/unordered_map.hpp>
+#ifndef CYBOZU_UNORDERED_MAP_STD
+#include <map>
+#endif
+#include <cybozu/exception.hpp>
+#include <cybozu/itoa.hpp>
+#include <cybozu/atoi.hpp>
+#include <mcl/window_method.hpp>
+
+namespace mcl {
+
+template<class _Ec, class Zn>
+struct ElgamalT {
+ typedef _Ec Ec;
+ struct CipherText {
+ Ec c1;
+ Ec c2;
+ CipherText()
+ {
+ clear();
+ }
+ /*
+ (c1, c2) = (0, 0) is trivial valid ciphertext for m = 0
+ */
+ void clear()
+ {
+ c1.clear();
+ c2.clear();
+ }
+ /*
+ add encoded message with encoded message
+ input : this = Enc(m1), c = Enc(m2)
+ output : this = Enc(m1 + m2)
+ */
+ void add(const CipherText& c)
+ {
+ Ec::add(c1, c1, c.c1);
+ Ec::add(c2, c2, c.c2);
+ }
+ /*
+ mul by x
+ input : this = Enc(m), x
+ output : this = Enc(m x)
+ */
+ template<class N>
+ void mul(const N& x)
+ {
+ Ec::mul(c1, c1, x);
+ Ec::mul(c2, c2, x);
+ }
+ /*
+ negative encoded message
+ input : this = Enc(m)
+ output : this = Enc(-m)
+ */
+ void neg()
+ {
+ Ec::neg(c1, c1);
+ Ec::neg(c2, c2);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ c1.load(is, ioMode);
+ c2.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ c1.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ c2.save(os, ioMode);
+ }
+ void getStr(std::string& str, int ioMode = 0) const
+ {
+ str.clear();
+ cybozu::StringOutputStream os(str);
+ save(os, ioMode);
+ }
+ std::string getStr(int ioMode = 0) const
+ {
+ std::string str;
+ getStr(str, ioMode);
+ return str;
+ }
+ void setStr(const std::string& str, int ioMode = 0)
+ {
+ cybozu::StringInputStream is(str);
+ load(is, ioMode);
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const CipherText& self)
+ {
+ self.save(os, fp::detectIoMode(Ec::getIoMode(), os));
+ return os;
+ }
+ friend inline std::istream& operator>>(std::istream& is, CipherText& self)
+ {
+ self.load(is, fp::detectIoMode(Ec::getIoMode(), is));
+ return is;
+ }
+ // obsolete
+ std::string toStr() const { return getStr(); }
+ void fromStr(const std::string& str) { setStr(str); }
+ };
+ /*
+ Zero Knowledge Proof
+ cipher text with ZKP to ensure m = 0 or 1
+ http://dx.doi.org/10.1587/transfun.E96.A.1156
+ */
+ struct Zkp {
+ Zn c0, c1, s0, s1;
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ c0.load(is, ioMode);
+ c1.load(is, ioMode);
+ s0.load(is, ioMode);
+ s1.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ c0.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ c1.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ s0.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ s1.save(os, ioMode);
+ }
+ void getStr(std::string& str, int ioMode = 0) const
+ {
+ str.clear();
+ cybozu::StringOutputStream os(str);
+ save(os, ioMode);
+ }
+ std::string getStr(int ioMode = 0) const
+ {
+ std::string str;
+ getStr(str, ioMode);
+ return str;
+ }
+ void setStr(const std::string& str, int ioMode = 0)
+ {
+ cybozu::StringInputStream is(str);
+ load(is, ioMode);
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const Zkp& self)
+ {
+ self.save(os, fp::detectIoMode(Ec::getIoMode(), os));
+ return os;
+ }
+ friend inline std::istream& operator>>(std::istream& is, Zkp& self)
+ {
+ self.load(is, fp::detectIoMode(Ec::getIoMode(), is));
+ return is;
+ }
+ // obsolete
+ std::string toStr() const { return getStr(); }
+ void fromStr(const std::string& str) { setStr(str); }
+ };
+
+ class PublicKey {
+ size_t bitSize;
+ Ec f;
+ Ec g;
+ Ec h;
+ bool enableWindowMethod_;
+ fp::WindowMethod<Ec> wm_f;
+ fp::WindowMethod<Ec> wm_g;
+ fp::WindowMethod<Ec> wm_h;
+ template<class N>
+ void mulDispatch(Ec& z, const Ec& x, const N& n, const fp::WindowMethod<Ec>& pw) const
+ {
+ if (enableWindowMethod_) {
+ pw.mul(z, n);
+ } else {
+ Ec::mul(z, x, n);
+ }
+ }
+ template<class N>
+ void mulF(Ec& z, const N& n) const { mulDispatch(z, f, n, wm_f); }
+ template<class N>
+ void mulG(Ec& z, const N& n) const { mulDispatch(z, g, n, wm_g); }
+ template<class N>
+ void mulH(Ec& z, const N& n) const { mulDispatch(z, h, n, wm_h); }
+ public:
+ PublicKey()
+ : bitSize(0)
+ , enableWindowMethod_(false)
+ {
+ }
+ void enableWindowMethod(size_t winSize = 10)
+ {
+ wm_f.init(f, bitSize, winSize);
+ wm_g.init(g, bitSize, winSize);
+ wm_h.init(h, bitSize, winSize);
+ enableWindowMethod_ = true;
+ }
+ const Ec& getF() const { return f; }
+ void init(size_t bitSize, const Ec& f, const Ec& g, const Ec& h)
+ {
+ this->bitSize = bitSize;
+ this->f = f;
+ this->g = g;
+ this->h = h;
+ enableWindowMethod_ = false;
+ enableWindowMethod();
+ }
+ /*
+ encode message
+ input : m
+ output : c = (c1, c2) = (g^u, h^u f^m)
+ */
+ void enc(CipherText& c, const Zn& m, fp::RandGen rg = fp::RandGen()) const
+ {
+ Zn u;
+ u.setRand(rg);
+ mulG(c.c1, u);
+ mulH(c.c2, u);
+ Ec t;
+ mulF(t, m);
+ Ec::add(c.c2, c.c2, t);
+ }
+ /*
+ encode message
+ input : m = 0 or 1
+ output : c (c1, c2), zkp
+ */
+ void encWithZkp(CipherText& c, Zkp& zkp, int m, fp::RandGen rg = fp::RandGen()) const
+ {
+ if (m != 0 && m != 1) {
+ throw cybozu::Exception("elgamal:PublicKey:encWithZkp") << m;
+ }
+ Zn u;
+ u.setRand(rg);
+ mulG(c.c1, u);
+ mulH(c.c2, u);
+ if (m) {
+ Ec::add(c.c2, c.c2, f);
+ Zn r1;
+ r1.setRand(rg);
+ zkp.c0.setRand(rg);
+ zkp.s0.setRand(rg);
+ Ec R01, R02, R11, R12;
+ Ec t1, t2;
+ mulG(t1, zkp.s0);
+ Ec::mul(t2, c.c1, zkp.c0);
+ Ec::sub(R01, t1, t2);
+ mulH(t1, zkp.s0);
+ Ec::mul(t2, c.c2, zkp.c0);
+ Ec::sub(R02, t1, t2);
+ mulG(R11, r1);
+ mulH(R12, r1);
+ std::ostringstream os;
+ os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h;
+ Zn cc;
+ cc.setHashOf(os.str());
+ zkp.c1 = cc - zkp.c0;
+ zkp.s1 = r1 + zkp.c1 * u;
+ } else {
+ Zn r0;
+ r0.setRand(rg);
+ zkp.c1.setRand(rg);
+ zkp.s1.setRand(rg);
+ Ec R01, R02, R11, R12;
+ mulG(R01, r0);
+ mulH(R02, r0);
+ Ec t1, t2;
+ mulG(t1, zkp.s1);
+ Ec::mul(t2, c.c1, zkp.c1);
+ Ec::sub(R11, t1, t2);
+ mulH(t1, zkp.s1);
+ Ec::sub(t2, c.c2, f);
+ Ec::mul(t2, t2, zkp.c1);
+ Ec::sub(R12, t1, t2);
+ std::ostringstream os;
+ os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h;
+ Zn cc;
+ cc.setHashOf(os.str());
+ zkp.c0 = cc - zkp.c1;
+ zkp.s0 = r0 + zkp.c0 * u;
+ }
+ }
+ /*
+ verify cipher text with ZKP
+ */
+ bool verify(const CipherText& c, const Zkp& zkp) const
+ {
+ Ec R01, R02, R11, R12;
+ Ec t1, t2;
+ mulG(t1, zkp.s0);
+ Ec::mul(t2, c.c1, zkp.c0);
+ Ec::sub(R01, t1, t2);
+ mulH(t1, zkp.s0);
+ Ec::mul(t2, c.c2, zkp.c0);
+ Ec::sub(R02, t1, t2);
+ mulG(t1, zkp.s1);
+ Ec::mul(t2, c.c1, zkp.c1);
+ Ec::sub(R11, t1, t2);
+ mulH(t1, zkp.s1);
+ Ec::sub(t2, c.c2, f);
+ Ec::mul(t2, t2, zkp.c1);
+ Ec::sub(R12, t1, t2);
+ std::ostringstream os;
+ os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h;
+ Zn cc;
+ cc.setHashOf(os.str());
+ return cc == zkp.c0 + zkp.c1;
+ }
+ /*
+ rerandomize encoded message
+ input : c = (c1, c2)
+ output : c = (c1 g^v, c2 h^v)
+ */
+ void rerandomize(CipherText& c, fp::RandGen rg = fp::RandGen()) const
+ {
+ Zn v;
+ v.setRand(rg);
+ Ec t;
+ mulG(t, v);
+ Ec::add(c.c1, c.c1, t);
+ mulH(t, v);
+ Ec::add(c.c2, c.c2, t);
+ }
+ /*
+ add encoded message with plain message
+ input : c = Enc(m1) = (c1, c2), m2
+ ouput : c = Enc(m1 + m2) = (c1, c2 f^m2)
+ */
+ template<class N>
+ void add(CipherText& c, const N& m) const
+ {
+ Ec fm;
+ mulF(fm, m);
+ Ec::add(c.c2, c.c2, fm);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ std::string s;
+ mcl::fp::local::loadWord(s, is);
+ bitSize = cybozu::atoi(s);
+ f.load(is, ioMode);
+ g.load(is, ioMode);
+ h.load(is, ioMode);
+ init(bitSize, f, g, h);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ std::string s = cybozu::itoa(bitSize);
+ cybozu::write(os, s.c_str(), s.size());
+ cybozu::writeChar(os, ' ');
+
+ const char sep = *fp::getIoSeparator(ioMode);
+ f.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ g.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ h.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ }
+ void getStr(std::string& str, int ioMode = 0) const
+ {
+ str.clear();
+ cybozu::StringOutputStream os(str);
+ save(os, ioMode);
+ }
+ std::string getStr(int ioMode = 0) const
+ {
+ std::string str;
+ getStr(str, ioMode);
+ return str;
+ }
+ void setStr(const std::string& str, int ioMode = 0)
+ {
+ cybozu::StringInputStream is(str);
+ load(is, ioMode);
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const PublicKey& self)
+ {
+ self.save(os, fp::detectIoMode(Ec::getIoMode(), os));
+ return os;
+ }
+ friend inline std::istream& operator>>(std::istream& is, PublicKey& self)
+ {
+ self.load(is, fp::detectIoMode(Ec::getIoMode(), is));
+ return is;
+ }
+ // obsolete
+ std::string toStr() const { return getStr(); }
+ void fromStr(const std::string& str) { setStr(str); }
+ };
+ /*
+ create table f^i for i in [rangeMin, rangeMax]
+ */
+ struct PowerCache {
+#if (CYBOZU_CPP_VERSION > CYBOZU_CPP_VERSION_CP03)
+ typedef CYBOZU_NAMESPACE_STD::unordered_map<Ec, int> Cache;
+#else
+ typedef std::map<Ec, int> Cache;
+#endif
+ Cache cache;
+ void init(const Ec& f, int rangeMin, int rangeMax)
+ {
+ if (rangeMin > rangeMax) throw cybozu::Exception("mcl:ElgamalT:PowerCache:bad range") << rangeMin << rangeMax;
+ Ec x;
+ x.clear();
+ cache[x] = 0;
+ for (int i = 1; i <= rangeMax; i++) {
+ Ec::add(x, x, f);
+ cache[x] = i;
+ }
+ Ec nf;
+ Ec::neg(nf, f);
+ x.clear();
+ for (int i = -1; i >= rangeMin; i--) {
+ Ec::add(x, x, nf);
+ cache[x] = i;
+ }
+ }
+ /*
+ return m such that f^m = g
+ */
+ int getExponent(const Ec& g, bool *b = 0) const
+ {
+ typename Cache::const_iterator i = cache.find(g);
+ if (i == cache.end()) {
+ if (b) {
+ *b = false;
+ return 0;
+ }
+ throw cybozu::Exception("Elgamal:PowerCache:getExponent:not found") << g;
+ }
+ if (b) *b = true;
+ return i->second;
+ }
+ void clear()
+ {
+ cache.clear();
+ }
+ bool isEmpty() const
+ {
+ return cache.empty();
+ }
+ };
+ class PrivateKey {
+ PublicKey pub;
+ Zn z;
+ PowerCache cache;
+ public:
+ /*
+ init
+ input : f
+ output : (g, h, z)
+ Ec = <f>
+ g in Ec
+ h = g^z
+ */
+ void init(const Ec& f, size_t bitSize, fp::RandGen rg = fp::RandGen())
+ {
+ Ec g, h;
+ z.setRand(rg);
+ Ec::mul(g, f, z);
+ z.setRand(rg);
+ Ec::mul(h, g, z);
+ pub.init(bitSize, f, g, h);
+ }
+ const PublicKey& getPublicKey() const { return pub; }
+ /*
+ decode message by brute-force attack
+ input : c = (c1, c2)
+ output : m
+ M = c2 / c1^z
+ find m such that M = f^m and |m| < limit
+ @memo 7sec@core i3 for m = 1e6
+ */
+ void dec(Zn& m, const CipherText& c, int limit = 100000) const
+ {
+ const Ec& f = pub.getF();
+ Ec c1z;
+ Ec::mul(c1z, c.c1, z);
+ if (c1z == c.c2) {
+ m = 0;
+ return;
+ }
+ Ec t1(c1z);
+ Ec t2(c.c2);
+ for (int i = 1; i < limit; i++) {
+ Ec::add(t1, t1, f);
+ if (t1 == c.c2) {
+ m = i;
+ return;
+ }
+ Ec::add(t2, t2, f);
+ if (t2 == c1z) {
+ m = -i;
+ return;
+ }
+ }
+ throw cybozu::Exception("elgamal:PrivateKey:dec:overflow");
+ }
+ /*
+ powfm = c2 / c1^z = f^m
+ */
+ void getPowerf(Ec& powfm, const CipherText& c) const
+ {
+ Ec c1z;
+ Ec::mul(c1z, c.c1, z);
+ Ec::sub(powfm, c.c2, c1z);
+ }
+ /*
+ set range of message to decode quickly
+ */
+ void setCache(int rangeMin, int rangeMax)
+ {
+ cache.init(pub.getF(), rangeMin, rangeMax);
+ }
+ /*
+ clear cache
+ */
+ void clearCache()
+ {
+ cache.clear();
+ }
+ /*
+ decode message by lookup table if !cache.isEmpty()
+ brute-force attack otherwise
+ input : c = (c1, c2)
+ b : set false if not found
+ return m
+ */
+ int dec(const CipherText& c, bool *b = 0) const
+ {
+ Ec powfm;
+ getPowerf(powfm, c);
+ return cache.getExponent(powfm, b);
+ }
+ /*
+ check whether c is encrypted zero message
+ */
+ bool isZeroMessage(const CipherText& c) const
+ {
+ Ec c1z;
+ Ec::mul(c1z, c.c1, z);
+ return c.c2 == c1z;
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ pub.load(is, ioMode);
+ z.load(is, ioMode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ pub.save(os, ioMode);
+ if (sep) cybozu::writeChar(os, sep);
+ z.save(os, ioMode);
+ }
+ void getStr(std::string& str, int ioMode = 0) const
+ {
+ str.clear();
+ cybozu::StringOutputStream os(str);
+ save(os, ioMode);
+ }
+ std::string getStr(int ioMode = 0) const
+ {
+ std::string str;
+ getStr(str, ioMode);
+ return str;
+ }
+ void setStr(const std::string& str, int ioMode = 0)
+ {
+ cybozu::StringInputStream is(str);
+ load(is, ioMode);
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const PrivateKey& self)
+ {
+ self.save(os, fp::detectIoMode(Ec::getIoMode(), os));
+ return os;
+ }
+ friend inline std::istream& operator>>(std::istream& is, PrivateKey& self)
+ {
+ self.load(is, fp::detectIoMode(Ec::getIoMode(), is));
+ return is;
+ }
+ std::string toStr() const { return getStr(); }
+ void fromStr(const std::string& str) { setStr(str); }
+ };
+};
+
+} // mcl
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/fp.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/fp.hpp
new file mode 100644
index 000000000..2e69729dd
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/fp.hpp
@@ -0,0 +1,661 @@
+#pragma once
+/**
+ @file
+ @brief finite field class
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#ifndef CYBOZU_DONT_USE_STRING
+#include <iosfwd>
+#endif
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4458)
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #ifndef MCL_NO_AUTOLINK
+ #ifdef NDEBUG
+ #pragma comment(lib, "mcl.lib")
+ #else
+ #pragma comment(lib, "mcl.lib")
+ #endif
+ #endif
+#endif
+#include <cybozu/hash.hpp>
+#include <cybozu/stream.hpp>
+#include <mcl/op.hpp>
+#include <mcl/util.hpp>
+#include <mcl/operator.hpp>
+#include <mcl/conversion.hpp>
+
+namespace mcl {
+
+struct FpTag;
+struct ZnTag;
+
+namespace fp {
+
+// copy src to dst as little endian
+void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize);
+// copy src to dst as little endian
+void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize);
+
+bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode);
+
+uint64_t getUint64(bool *pb, const fp::Block& b);
+int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op);
+
+const char *ModeToStr(Mode mode);
+
+Mode StrToMode(const char *s);
+
+#ifndef CYBOZU_DONT_USE_STRING
+inline Mode StrToMode(const std::string& s)
+{
+ return StrToMode(s.c_str());
+}
+#endif
+
+inline void dumpUnit(Unit x)
+{
+#if MCL_SIZEOF_UNIT == 4
+ printf("%08x", (uint32_t)x);
+#else
+ printf("%016llx", (unsigned long long)x);
+#endif
+}
+
+bool isEnableJIT(); // 1st call is not threadsafe
+
+uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize);
+uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize);
+
+} // mcl::fp
+
+template<class tag = FpTag, size_t maxBitSize = MCL_MAX_BIT_SIZE>
+class FpT : public fp::Serializable<FpT<tag, maxBitSize>,
+ fp::Operator<FpT<tag, maxBitSize> > > {
+ typedef fp::Unit Unit;
+ typedef fp::Operator<FpT<tag, maxBitSize> > Operator;
+ typedef fp::Serializable<FpT<tag, maxBitSize>, Operator> Serializer;
+public:
+ static const size_t maxSize = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize;
+private:
+ template<class tag2, size_t maxBitSize2> friend class FpT;
+ Unit v_[maxSize];
+ static fp::Op op_;
+ static FpT<tag, maxBitSize> inv2_;
+ static int ioMode_;
+ template<class Fp> friend class FpDblT;
+ template<class Fp> friend class Fp2T;
+ template<class Fp> friend struct Fp6T;
+public:
+ typedef FpT<tag, maxBitSize> BaseFp;
+ // return pointer to array v_[]
+ const Unit *getUnit() const { return v_; }
+ FpT* getFp0() { return this; }
+ const FpT* getFp0() const { return this; }
+ static inline size_t getUnitSize() { return op_.N; }
+ static inline size_t getBitSize() { return op_.bitSize; }
+ static inline size_t getByteSize() { return (op_.bitSize + 7) / 8; }
+ static inline const fp::Op& getOp() { return op_; }
+ void dump() const
+ {
+ const size_t N = op_.N;
+ for (size_t i = 0; i < N; i++) {
+ fp::dumpUnit(v_[N - 1 - i]);
+ }
+ printf("\n");
+ }
+ /*
+ xi_a is used for Fp2::mul_xi(), where xi = xi_a + i and i^2 = -1
+ if xi_a = 0 then asm functions for Fp2 are not generated.
+ */
+ static inline void init(bool *pb, int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO)
+ {
+ assert(maxBitSize <= MCL_MAX_BIT_SIZE);
+ *pb = op_.init(p, maxBitSize, xi_a, mode);
+ if (!*pb) return;
+ { // set oneRep
+ FpT& one = *reinterpret_cast<FpT*>(op_.oneRep);
+ one.clear();
+ one.v_[0] = 1;
+ one.toMont();
+ }
+ { // set half
+ mpz_class half = (op_.mp + 1) / 2;
+ gmp::getArray(pb, op_.half, op_.N, half);
+ if (!*pb) return;
+ }
+ inv(inv2_, 2);
+#ifdef MCL_XBYAK_DIRECT_CALL
+ add = fp::func_ptr_cast<void (*)(FpT& z, const FpT& x, const FpT& y)>(op_.fp_addA_);
+ if (add == 0) add = addC;
+ sub = fp::func_ptr_cast<void (*)(FpT& z, const FpT& x, const FpT& y)>(op_.fp_subA_);
+ if (sub == 0) sub = subC;
+ neg = fp::func_ptr_cast<void (*)(FpT& y, const FpT& x)>(op_.fp_negA_);
+ if (neg == 0) neg = negC;
+ mul = fp::func_ptr_cast<void (*)(FpT& z, const FpT& x, const FpT& y)>(op_.fp_mulA_);
+ if (mul == 0) mul = mulC;
+ sqr = fp::func_ptr_cast<void (*)(FpT& y, const FpT& x)>(op_.fp_sqrA_);
+ if (sqr == 0) sqr = sqrC;
+#endif
+ *pb = true;
+ }
+ static inline void init(bool *pb, const mpz_class& p, fp::Mode mode = fp::FP_AUTO)
+ {
+ init(pb, 0, p, mode);
+ }
+ static inline void init(bool *pb, const char *mstr, fp::Mode mode = fp::FP_AUTO)
+ {
+ mpz_class p;
+ gmp::setStr(pb, p, mstr);
+ if (!*pb) return;
+ init(pb, p, mode);
+ }
+ static inline size_t getModulo(char *buf, size_t bufSize)
+ {
+ return gmp::getStr(buf, bufSize, op_.mp);
+ }
+ static inline bool isFullBit() { return op_.isFullBit; }
+ /*
+ binary patter of p
+ @note the value of p is zero
+ */
+ static inline const FpT& getP()
+ {
+ return *reinterpret_cast<const FpT*>(op_.p);
+ }
+ bool isOdd() const
+ {
+ fp::Block b;
+ getBlock(b);
+ return (b.p[0] & 1) == 1;
+ }
+ static inline bool squareRoot(FpT& y, const FpT& x)
+ {
+ if (isMont()) return op_.sq.get(y, x);
+ mpz_class mx, my;
+ bool b = false;
+ x.getMpz(&b, mx);
+ if (!b) return false;
+ b = op_.sq.get(my, mx);
+ if (!b) return false;
+ y.setMpz(&b, my);
+ return b;
+ }
+ FpT() {}
+ FpT(const FpT& x)
+ {
+ op_.fp_copy(v_, x.v_);
+ }
+ FpT& operator=(const FpT& x)
+ {
+ op_.fp_copy(v_, x.v_);
+ return *this;
+ }
+ void clear()
+ {
+ op_.fp_clear(v_);
+ }
+ FpT(int64_t x) { operator=(x); }
+ FpT& operator=(int64_t x)
+ {
+ if (x == 1) {
+ op_.fp_copy(v_, op_.oneRep);
+ } else {
+ clear();
+ if (x) {
+ int64_t y = x < 0 ? -x : x;
+ if (sizeof(Unit) == 8) {
+ v_[0] = y;
+ } else {
+ v_[0] = (uint32_t)y;
+ v_[1] = (uint32_t)(y >> 32);
+ }
+ if (x < 0) neg(*this, *this);
+ toMont();
+ }
+ }
+ return *this;
+ }
+ static inline bool isMont() { return op_.isMont; }
+ /*
+ convert normal value to Montgomery value
+ do nothing is !isMont()
+ */
+ void toMont()
+ {
+ if (isMont()) op_.toMont(v_, v_);
+ }
+ /*
+ convert Montgomery value to normal value
+ do nothing is !isMont()
+ */
+ void fromMont()
+ {
+ if (isMont()) op_.fromMont(v_, v_);
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+ bool isMinus = false;
+ *pb = false;
+ if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) {
+ const size_t n = getByteSize();
+ v_[op_.N - 1] = 0;
+ size_t readSize;
+ if (ioMode & IoSerializeHexStr) {
+ readSize = mcl::fp::readHexStr(v_, n, is);
+ } else {
+ readSize = cybozu::readSome(v_, n, is);
+ }
+ if (readSize != n) return;
+ } else {
+ char buf[1024];
+ size_t n = fp::local::loadWord(buf, sizeof(buf), is);
+ if (n == 0) return;
+ n = fp::strToArray(&isMinus, v_, op_.N, buf, n, ioMode);
+ if (n == 0) return;
+ for (size_t i = n; i < op_.N; i++) v_[i] = 0;
+ }
+ if (fp::isGreaterOrEqualArray(v_, op_.p, op_.N)) {
+ return;
+ }
+ if (isMinus) {
+ neg(*this, *this);
+ }
+ if (!(ioMode & IoArrayRaw)) {
+ toMont();
+ }
+ *pb = true;
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode) const
+ {
+ const size_t n = getByteSize();
+ if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) {
+ if (ioMode & IoArrayRaw) {
+ cybozu::write(pb, os, v_, n);
+ } else {
+ fp::Block b;
+ getBlock(b);
+ if (ioMode & IoSerializeHexStr) {
+ mcl::fp::writeHexStr(pb, os, b.p, n);
+ } else {
+ cybozu::write(pb, os, b.p, n);
+ }
+ }
+ return;
+ }
+ fp::Block b;
+ getBlock(b);
+ // use low 8-bit ioMode for (base, withPrefix)
+ char buf[2048];
+ size_t len = mcl::fp::arrayToStr(buf, sizeof(buf), b.p, b.n, ioMode & 31, (ioMode & IoPrefix) != 0);
+ if (len == 0) {
+ *pb = false;
+ return;
+ }
+ cybozu::write(pb, os, buf + sizeof(buf) - len, len);
+ }
+ /*
+ mode = Mod : set x mod p if sizeof(S) * n <= 64 else error
+ */
+ template<class S>
+ void setArray(bool *pb, const S *x, size_t n, mcl::fp::MaskMode mode = fp::NoMask)
+ {
+ *pb = fp::copyAndMask(v_, x, sizeof(S) * n, op_, mode);
+ toMont();
+ }
+ /*
+ mask x with (1 << bitLen) and subtract p if x >= p
+ */
+ template<class S>
+ void setArrayMaskMod(const S *x, size_t n)
+ {
+ fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::MaskAndMod);
+ toMont();
+ }
+
+ /*
+ mask x with (1 << (bitLen - 1)) - 1 if x >= p
+ */
+ template<class S>
+ void setArrayMask(const S *x, size_t n)
+ {
+ fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::SmallMask);
+ toMont();
+ }
+ void getBlock(fp::Block& b) const
+ {
+ b.n = op_.N;
+ if (isMont()) {
+ op_.fromMont(b.v_, v_);
+ b.p = &b.v_[0];
+ } else {
+ b.p = &v_[0];
+ }
+ }
+ void setByCSPRNG(bool *pb, fp::RandGen rg = fp::RandGen())
+ {
+ if (rg.isZero()) rg = fp::RandGen::get();
+ rg.read(pb, v_, op_.N * sizeof(Unit)); // byte size
+ if (!pb) return;
+ setArrayMask(v_, op_.N);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void setByCSPRNG(fp::RandGen rg = fp::RandGen())
+ {
+ bool b;
+ setByCSPRNG(&b, rg);
+ if (!b) throw cybozu::Exception("setByCSPRNG");
+ }
+#endif
+ void setRand(fp::RandGen rg = fp::RandGen()) // old api
+ {
+ setByCSPRNG(rg);
+ }
+ /*
+ hash msg and mask with (1 << (bitLen - 1)) - 1
+ */
+ void setHashOf(const void *msg, size_t msgSize)
+ {
+ char buf[MCL_MAX_HASH_BIT_SIZE / 8];
+ uint32_t size = op_.hash(buf, static_cast<uint32_t>(sizeof(buf)), msg, static_cast<uint32_t>(msgSize));
+ setArrayMask(buf, size);
+ }
+ void getMpz(bool *pb, mpz_class& x) const
+ {
+ fp::Block b;
+ getBlock(b);
+ gmp::setArray(pb, x, b.p, b.n);
+ }
+ void setMpz(bool *pb, const mpz_class& x)
+ {
+ if (x < 0) {
+ *pb = false;
+ return;
+ }
+ setArray(pb, gmp::getUnit(x), gmp::getUnitSize(x));
+ }
+#ifdef MCL_XBYAK_DIRECT_CALL
+ static void (*add)(FpT& z, const FpT& x, const FpT& y);
+ static inline void addC(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); }
+ static void (*sub)(FpT& z, const FpT& x, const FpT& y);
+ static inline void subC(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); }
+ static void (*neg)(FpT& y, const FpT& x);
+ static inline void negC(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); }
+ static void (*mul)(FpT& z, const FpT& x, const FpT& y);
+ static inline void mulC(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); }
+ static void (*sqr)(FpT& y, const FpT& x);
+ static inline void sqrC(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); }
+#else
+ static inline void add(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); }
+ static inline void sub(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); }
+ static inline void neg(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); }
+ static inline void mul(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); }
+ static inline void sqr(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); }
+#endif
+ static inline void addPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_addPre(z.v_, x.v_, y.v_); }
+ static inline void subPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_subPre(z.v_, x.v_, y.v_); }
+ static inline void mulUnit(FpT& z, const FpT& x, const Unit y)
+ {
+ if (mulSmallUnit(z, x, y)) return;
+ op_.fp_mulUnit(z.v_, x.v_, y, op_.p);
+ }
+ static inline void inv(FpT& y, const FpT& x) { op_.fp_invOp(y.v_, x.v_, op_); }
+ static inline void divBy2(FpT& y, const FpT& x)
+ {
+#if 0
+ mul(y, x, inv2_);
+#else
+ bool odd = (x.v_[0] & 1) != 0;
+ op_.fp_shr1(y.v_, x.v_);
+ if (odd) {
+ op_.fp_addPre(y.v_, y.v_, op_.half);
+ }
+#endif
+ }
+ static inline void divBy4(FpT& y, const FpT& x)
+ {
+ divBy2(y, x); // QQQ : optimize later
+ divBy2(y, y);
+ }
+ bool isZero() const { return op_.fp_isZero(v_); }
+ bool isOne() const { return fp::isEqualArray(v_, op_.oneRep, op_.N); }
+ static const inline FpT& one() { return *reinterpret_cast<const FpT*>(op_.oneRep); }
+ /*
+ half = (p + 1) / 2
+ return true if half <= x < p
+ return false if 0 <= x < half
+ */
+ bool isNegative() const
+ {
+ fp::Block b;
+ getBlock(b);
+ return fp::isGreaterOrEqualArray(b.p, op_.half, op_.N);
+ }
+ bool isValid() const
+ {
+ return fp::isLessArray(v_, op_.p, op_.N);
+ }
+ uint64_t getUint64(bool *pb) const
+ {
+ fp::Block b;
+ getBlock(b);
+ return fp::getUint64(pb, b);
+ }
+ int64_t getInt64(bool *pb) const
+ {
+ fp::Block b;
+ getBlock(b);
+ return fp::getInt64(pb, b, op_);
+ }
+ bool operator==(const FpT& rhs) const { return fp::isEqualArray(v_, rhs.v_, op_.N); }
+ bool operator!=(const FpT& rhs) const { return !operator==(rhs); }
+ /*
+ @note
+ this compare functions is slow because of calling mul if isMont is true.
+ */
+ static inline int compare(const FpT& x, const FpT& y)
+ {
+ fp::Block xb, yb;
+ x.getBlock(xb);
+ y.getBlock(yb);
+ return fp::compareArray(xb.p, yb.p, op_.N);
+ }
+ bool isLess(const FpT& rhs) const
+ {
+ fp::Block xb, yb;
+ getBlock(xb);
+ rhs.getBlock(yb);
+ return fp::isLessArray(xb.p, yb.p, op_.N);
+ }
+ bool operator<(const FpT& rhs) const { return isLess(rhs); }
+ bool operator>=(const FpT& rhs) const { return !operator<(rhs); }
+ bool operator>(const FpT& rhs) const { return rhs < *this; }
+ bool operator<=(const FpT& rhs) const { return !operator>(rhs); }
+ /*
+ @note
+ return unexpected order if isMont is set.
+ */
+ static inline int compareRaw(const FpT& x, const FpT& y)
+ {
+ return fp::compareArray(x.v_, y.v_, op_.N);
+ }
+ bool isLessRaw(const FpT& rhs) const
+ {
+ return fp::isLessArray(v_, rhs.v_, op_.N);
+ }
+ /*
+ set IoMode for operator<<(), or operator>>()
+ */
+ static inline void setIoMode(int ioMode)
+ {
+ ioMode_ = ioMode;
+ }
+ static inline int getIoMode() { return ioMode_; }
+ static inline size_t getModBitLen() { return getBitSize(); }
+ static inline void setHashFunc(uint32_t hash(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize))
+ {
+ op_.hash = hash;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ explicit FpT(const std::string& str, int base = 0)
+ {
+ Serializer::setStr(str, base);
+ }
+ static inline void getModulo(std::string& pstr)
+ {
+ gmp::getStr(pstr, op_.mp);
+ }
+ static std::string getModulo()
+ {
+ std::string s;
+ getModulo(s);
+ return s;
+ }
+ void setHashOf(const std::string& msg)
+ {
+ setHashOf(msg.data(), msg.size());
+ }
+ // backward compatibility
+ static inline void setModulo(const std::string& mstr, fp::Mode mode = fp::FP_AUTO)
+ {
+ init(mstr, mode);
+ }
+ friend inline std::ostream& operator<<(std::ostream& os, const FpT& self)
+ {
+ self.save(os, fp::detectIoMode(getIoMode(), os));
+ return os;
+ }
+ friend inline std::istream& operator>>(std::istream& is, FpT& self)
+ {
+ self.load(is, fp::detectIoMode(getIoMode(), is));
+ return is;
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ static inline void init(int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO)
+ {
+ bool b;
+ init(&b, xi_a, p, mode);
+ if (!b) throw cybozu::Exception("Fp:init");
+ }
+ static inline void init(int xi_a, const std::string& mstr, fp::Mode mode = fp::FP_AUTO)
+ {
+ mpz_class p;
+ gmp::setStr(p, mstr);
+ init(xi_a, p, mode);
+ }
+ static inline void init(const mpz_class& p, fp::Mode mode = fp::FP_AUTO)
+ {
+ init(0, p, mode);
+ }
+ static inline void init(const std::string& mstr, fp::Mode mode = fp::FP_AUTO)
+ {
+ init(0, mstr, mode);
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("fp:save") << ioMode;
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("fp:load") << ioMode;
+ }
+ /*
+ throw exception if x >= p
+ */
+ template<class S>
+ void setArray(const S *x, size_t n)
+ {
+ bool b;
+ setArray(&b, x, n);
+ if (!b) throw cybozu::Exception("Fp:setArray");
+ }
+ void setMpz(const mpz_class& x)
+ {
+ bool b;
+ setMpz(&b, x);
+ if (!b) throw cybozu::Exception("Fp:setMpz");
+ }
+ uint64_t getUint64() const
+ {
+ bool b;
+ uint64_t v = getUint64(&b);
+ if (!b) throw cybozu::Exception("Fp:getUint64:large value");
+ return v;
+ }
+ int64_t getInt64() const
+ {
+ bool b;
+ int64_t v = getInt64(&b);
+ if (!b) throw cybozu::Exception("Fp:getInt64:large value");
+ return v;
+ }
+ void getMpz(mpz_class& x) const
+ {
+ bool b;
+ getMpz(&b, x);
+ if (!b) throw cybozu::Exception("Fp:getMpz");
+ }
+ mpz_class getMpz() const
+ {
+ mpz_class x;
+ getMpz(x);
+ return x;
+ }
+#endif
+};
+
+template<class tag, size_t maxBitSize> fp::Op FpT<tag, maxBitSize>::op_;
+template<class tag, size_t maxBitSize> FpT<tag, maxBitSize> FpT<tag, maxBitSize>::inv2_;
+template<class tag, size_t maxBitSize> int FpT<tag, maxBitSize>::ioMode_ = IoAuto;
+#ifdef MCL_XBYAK_DIRECT_CALL
+template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::add)(FpT& z, const FpT& x, const FpT& y);
+template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::sub)(FpT& z, const FpT& x, const FpT& y);
+template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::neg)(FpT& y, const FpT& x);
+template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::mul)(FpT& z, const FpT& x, const FpT& y);
+template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::sqr)(FpT& y, const FpT& x);
+#endif
+
+} // mcl
+
+#ifdef CYBOZU_USE_BOOST
+namespace mcl {
+
+template<class tag, size_t maxBitSize>
+size_t hash_value(const mcl::FpT<tag, maxBitSize>& x, size_t v = 0)
+{
+ return static_cast<size_t>(cybozu::hash64(x.getUnit(), x.getUnitSize(), v));
+}
+
+}
+#else
+namespace std { CYBOZU_NAMESPACE_TR1_BEGIN
+
+template<class tag, size_t maxBitSize>
+struct hash<mcl::FpT<tag, maxBitSize> > {
+ size_t operator()(const mcl::FpT<tag, maxBitSize>& x, uint64_t v = 0) const
+ {
+ return static_cast<size_t>(cybozu::hash64(x.getUnit(), x.getUnitSize(), v));
+ }
+};
+
+CYBOZU_NAMESPACE_TR1_END } // std::tr1
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/fp_tower.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/fp_tower.hpp
new file mode 100644
index 000000000..95722e2d5
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/fp_tower.hpp
@@ -0,0 +1,1364 @@
+#pragma once
+/**
+ @file
+ @brief finite field extension class
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/fp.hpp>
+
+namespace mcl {
+
+template<class Fp>
+class FpDblT : public fp::Serializable<FpDblT<Fp> > {
+ typedef fp::Unit Unit;
+ Unit v_[Fp::maxSize * 2];
+public:
+ static size_t getUnitSize() { return Fp::op_.N * 2; }
+ FpDblT() : v_()
+ {
+ }
+ FpDblT(const FpDblT& rhs)
+ {
+ const size_t n = getUnitSize();
+ for (size_t i = 0; i < n; i++) {
+ v_[i] = rhs.v_[i];
+ }
+ }
+ void dump() const
+ {
+ const size_t n = getUnitSize();
+ for (size_t i = 0; i < n; i++) {
+ mcl::fp::dumpUnit(v_[n - 1 - i]);
+ }
+ printf("\n");
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int) const
+ {
+ char buf[1024];
+ size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), v_, getUnitSize());
+ if (n == 0) {
+ *pb = false;
+ return;
+ }
+ cybozu::write(pb, os, buf + sizeof(buf) - n, sizeof(buf));
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int)
+ {
+ char buf[1024];
+ *pb = false;
+ size_t n = fp::local::loadWord(buf, sizeof(buf), is);
+ if (n == 0) return;
+ n = fp::hexToArray(v_, getUnitSize(), buf, n);
+ if (n == 0) return;
+ for (size_t i = n; i < getUnitSize(); i++) v_[i] = 0;
+ *pb = true;
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("FpDblT:save") << ioMode;
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("FpDblT:load") << ioMode;
+ }
+ void getMpz(mpz_class& x) const
+ {
+ bool b;
+ getMpz(&b, x);
+ if (!b) throw cybozu::Exception("FpDblT:getMpz");
+ }
+ mpz_class getMpz() const
+ {
+ mpz_class x;
+ getMpz(x);
+ return x;
+ }
+#endif
+ void clear()
+ {
+ const size_t n = getUnitSize();
+ for (size_t i = 0; i < n; i++) {
+ v_[i] = 0;
+ }
+ }
+ FpDblT& operator=(const FpDblT& rhs)
+ {
+ const size_t n = getUnitSize();
+ for (size_t i = 0; i < n; i++) {
+ v_[i] = rhs.v_[i];
+ }
+ return *this;
+ }
+ // QQQ : does not check range of x strictly(use for debug)
+ void setMpz(const mpz_class& x)
+ {
+ assert(x >= 0);
+ const size_t xn = gmp::getUnitSize(x);
+ const size_t N2 = getUnitSize();
+ if (xn > N2) {
+ assert(0);
+ return;
+ }
+ memcpy(v_, gmp::getUnit(x), xn * sizeof(Unit));
+ memset(v_ + xn, 0, (N2 - xn) * sizeof(Unit));
+ }
+ void getMpz(bool *pb, mpz_class& x) const
+ {
+ gmp::setArray(pb, x, v_, Fp::op_.N * 2);
+ }
+#ifdef MCL_XBYAK_DIRECT_CALL
+ static void (*add)(FpDblT& z, const FpDblT& x, const FpDblT& y);
+ static void (*sub)(FpDblT& z, const FpDblT& x, const FpDblT& y);
+ static void (*mod)(Fp& z, const FpDblT& xy);
+ static void (*addPre)(FpDblT& z, const FpDblT& x, const FpDblT& y);
+ static void (*subPre)(FpDblT& z, const FpDblT& x, const FpDblT& y);
+ static void addC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); }
+ static void subC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); }
+ static void modC(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); }
+ static void addPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); }
+ static void subPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); }
+#else
+ static void add(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); }
+ static void sub(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); }
+ static void mod(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); }
+ static void addPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); }
+ static void subPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); }
+#endif
+ static void mulPreC(FpDblT& xy, const Fp& x, const Fp& y) { Fp::op_.fpDbl_mulPre(xy.v_, x.v_, y.v_); }
+ static void sqrPreC(FpDblT& xx, const Fp& x) { Fp::op_.fpDbl_sqrPre(xx.v_, x.v_); }
+ /*
+ mul(z, x, y) = mulPre(xy, x, y) + mod(z, xy)
+ */
+ static void (*mulPre)(FpDblT& xy, const Fp& x, const Fp& y);
+ static void (*sqrPre)(FpDblT& xx, const Fp& x);
+ static void mulUnit(FpDblT& z, const FpDblT& x, Unit y)
+ {
+ if (mulSmallUnit(z, x, y)) return;
+ assert(0); // not supported y
+ }
+ static void init()
+ {
+ const mcl::fp::Op& op = Fp::getOp();
+#ifdef MCL_XBYAK_DIRECT_CALL
+ add = fp::func_ptr_cast<void (*)(FpDblT&, const FpDblT&, const FpDblT&)>(op.fpDbl_addA_);
+ if (add == 0) add = addC;
+ sub = fp::func_ptr_cast<void (*)(FpDblT&, const FpDblT&, const FpDblT&)>(op.fpDbl_subA_);
+ if (sub == 0) sub = subC;
+ mod = fp::func_ptr_cast<void (*)(Fp&, const FpDblT&)>(op.fpDbl_modA_);
+ if (mod == 0) mod = modC;
+ addPre = fp::func_ptr_cast<void (*)(FpDblT&, const FpDblT&, const FpDblT&)>(op.fpDbl_addPre);
+ if (addPre == 0) addPre = addPreC;
+ subPre = fp::func_ptr_cast<void (*)(FpDblT&, const FpDblT&, const FpDblT&)>(op.fpDbl_subPre);
+ if (subPre == 0) subPre = subPreC;
+#endif
+ if (op.fpDbl_mulPreA_) {
+ mulPre = fp::func_ptr_cast<void (*)(FpDblT&, const Fp&, const Fp&)>(op.fpDbl_mulPreA_);
+ } else {
+ mulPre = mulPreC;
+ }
+ if (op.fpDbl_sqrPreA_) {
+ sqrPre = fp::func_ptr_cast<void (*)(FpDblT&, const Fp&)>(op.fpDbl_sqrPreA_);
+ } else {
+ sqrPre = sqrPreC;
+ }
+ }
+ void operator+=(const FpDblT& x) { add(*this, *this, x); }
+ void operator-=(const FpDblT& x) { sub(*this, *this, x); }
+};
+
+#ifdef MCL_XBYAK_DIRECT_CALL
+template<class Fp> void (*FpDblT<Fp>::add)(FpDblT&, const FpDblT&, const FpDblT&);
+template<class Fp> void (*FpDblT<Fp>::sub)(FpDblT&, const FpDblT&, const FpDblT&);
+template<class Fp> void (*FpDblT<Fp>::mod)(Fp&, const FpDblT&);
+template<class Fp> void (*FpDblT<Fp>::addPre)(FpDblT&, const FpDblT&, const FpDblT&);
+template<class Fp> void (*FpDblT<Fp>::subPre)(FpDblT&, const FpDblT&, const FpDblT&);
+#endif
+template<class Fp> void (*FpDblT<Fp>::mulPre)(FpDblT&, const Fp&, const Fp&);
+template<class Fp> void (*FpDblT<Fp>::sqrPre)(FpDblT&, const Fp&);
+
+template<class Fp> struct Fp12T;
+template<class Fp> class BNT;
+template<class Fp> struct Fp2DblT;
+/*
+ beta = -1
+ Fp2 = F[i] / (i^2 + 1)
+ x = a + bi
+*/
+template<class _Fp>
+class Fp2T : public fp::Serializable<Fp2T<_Fp>,
+ fp::Operator<Fp2T<_Fp> > > {
+ typedef _Fp Fp;
+ typedef fp::Unit Unit;
+ typedef FpDblT<Fp> FpDbl;
+ typedef Fp2DblT<Fp> Fp2Dbl;
+ static const size_t gN = 5;
+ /*
+ g = xi^((p - 1) / 6)
+ g[] = { g^2, g^4, g^1, g^3, g^5 }
+ */
+ static Fp2T g[gN];
+ static Fp2T g2[gN];
+ static Fp2T g3[gN];
+public:
+ static const Fp2T *get_gTbl() { return &g[0]; }
+ static const Fp2T *get_g2Tbl() { return &g2[0]; }
+ static const Fp2T *get_g3Tbl() { return &g3[0]; }
+ typedef typename Fp::BaseFp BaseFp;
+ static const size_t maxSize = Fp::maxSize * 2;
+ static inline size_t getByteSize() { return Fp::getByteSize() * 2; }
+ void dump() const
+ {
+ a.dump();
+ b.dump();
+ }
+ Fp a, b;
+ Fp2T() { }
+ Fp2T(int64_t a) : a(a), b(0) { }
+ Fp2T(const Fp& a, const Fp& b) : a(a), b(b) { }
+ Fp2T(int64_t a, int64_t b) : a(a), b(b) { }
+ Fp* getFp0() { return &a; }
+ const Fp* getFp0() const { return &a; }
+ const Unit* getUnit() const { return a.getUnit(); }
+ void clear()
+ {
+ a.clear();
+ b.clear();
+ }
+ void set(const Fp &a_, const Fp &b_)
+ {
+ a = a_;
+ b = b_;
+ }
+#ifdef MCL_XBYAK_DIRECT_CALL
+ static void (*add)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+ static void (*sub)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+ static void (*neg)(Fp2T& y, const Fp2T& x);
+ static void (*mul)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+ static void (*sqr)(Fp2T& y, const Fp2T& x);
+#else
+ static void add(Fp2T& z, const Fp2T& x, const Fp2T& y) { addC(z, x, y); }
+ static void sub(Fp2T& z, const Fp2T& x, const Fp2T& y) { subC(z, x, y); }
+ static void neg(Fp2T& y, const Fp2T& x) { negC(y, x); }
+ static void mul(Fp2T& z, const Fp2T& x, const Fp2T& y) { mulC(z, x, y); }
+ static void sqr(Fp2T& y, const Fp2T& x) { sqrC(y, x); }
+#endif
+ static void (*mul_xi)(Fp2T& y, const Fp2T& x);
+ static void addPre(Fp2T& z, const Fp2T& x, const Fp2T& y) { Fp::addPre(z.a, x.a, y.a); Fp::addPre(z.b, x.b, y.b); }
+ static void inv(Fp2T& y, const Fp2T& x) { Fp::op_.fp2_inv(y.a.v_, x.a.v_); }
+ static void divBy2(Fp2T& y, const Fp2T& x)
+ {
+ Fp::divBy2(y.a, x.a);
+ Fp::divBy2(y.b, x.b);
+ }
+ static void divBy4(Fp2T& y, const Fp2T& x)
+ {
+ Fp::divBy4(y.a, x.a);
+ Fp::divBy4(y.b, x.b);
+ }
+ static void mulFp(Fp2T& z, const Fp2T& x, const Fp& y)
+ {
+ Fp::mul(z.a, x.a, y);
+ Fp::mul(z.b, x.b, y);
+ }
+ template<class S>
+ void setArray(bool *pb, const S *buf, size_t n)
+ {
+ assert((n & 1) == 0);
+ n /= 2;
+ a.setArray(pb, buf, n);
+ if (!*pb) return;
+ b.setArray(pb, buf + n, n);
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+ a.load(pb, is, ioMode);
+ if (!*pb) return;
+ b.load(pb, is, ioMode);
+ }
+ /*
+ Fp2T = <a> + ' ' + <b>
+ */
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ a.save(pb, os, ioMode);
+ if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ b.save(pb, os, ioMode);
+ }
+ bool isZero() const { return a.isZero() && b.isZero(); }
+ bool isOne() const { return a.isOne() && b.isZero(); }
+ bool operator==(const Fp2T& rhs) const { return a == rhs.a && b == rhs.b; }
+ bool operator!=(const Fp2T& rhs) const { return !operator==(rhs); }
+ /*
+ return true is a is odd (do not consider b)
+ this function is for only compressed reprezentation of EC
+ isOdd() is not good naming. QQQ
+ */
+ bool isOdd() const { return a.isOdd(); }
+ /*
+ (a + bi)^2 = (a^2 - b^2) + 2ab i = c + di
+ A = a^2
+ B = b^2
+ A = (c +/- sqrt(c^2 + d^2))/2
+ b = d / 2a
+ */
+ static inline bool squareRoot(Fp2T& y, const Fp2T& x)
+ {
+ Fp t1, t2;
+ if (x.b.isZero()) {
+ if (Fp::squareRoot(t1, x.a)) {
+ y.a = t1;
+ y.b.clear();
+ } else {
+ bool b = Fp::squareRoot(t1, -x.a);
+ assert(b); (void)b;
+ y.a.clear();
+ y.b = t1;
+ }
+ return true;
+ }
+ Fp::sqr(t1, x.a);
+ Fp::sqr(t2, x.b);
+ t1 += t2; // c^2 + d^2
+ if (!Fp::squareRoot(t1, t1)) return false;
+ Fp::add(t2, x.a, t1);
+ Fp::divBy2(t2, t2);
+ if (!Fp::squareRoot(t2, t2)) {
+ Fp::sub(t2, x.a, t1);
+ Fp::divBy2(t2, t2);
+ bool b = Fp::squareRoot(t2, t2);
+ assert(b); (void)b;
+ }
+ y.a = t2;
+ t2 += t2;
+ Fp::inv(t2, t2);
+ Fp::mul(y.b, x.b, t2);
+ return true;
+ }
+ static void inline norm(Fp& y, const Fp2T& x)
+ {
+ Fp aa, bb;
+ Fp::sqr(aa, x.a);
+ Fp::sqr(bb, x.b);
+ Fp::add(y, aa, bb);
+ }
+ /*
+ Frobenius
+ i^2 = -1
+ (a + bi)^p = a + bi^p in Fp
+ = a + bi if p = 1 mod 4
+ = a - bi if p = 3 mod 4
+ */
+ static void Frobenius(Fp2T& y, const Fp2T& x)
+ {
+ if (Fp::getOp().pmod4 == 1) {
+ if (&y != &x) {
+ y = x;
+ }
+ } else {
+ if (&y != &x) {
+ y.a = x.a;
+ }
+ Fp::neg(y.b, x.b);
+ }
+ }
+
+ static uint32_t get_xi_a() { return Fp::getOp().xi_a; }
+ static void init()
+ {
+// assert(Fp::maxSize <= 256);
+ mcl::fp::Op& op = Fp::op_;
+ assert(op.xi_a);
+ mul_xi = 0;
+#ifdef MCL_XBYAK_DIRECT_CALL
+ add = fp::func_ptr_cast<void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y)>(op.fp2_addA_);
+ if (add == 0) add = addC;
+ sub = fp::func_ptr_cast<void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y)>(op.fp2_subA_);
+ if (sub == 0) sub = subC;
+ neg = fp::func_ptr_cast<void (*)(Fp2T& y, const Fp2T& x)>(op.fp2_negA_);
+ if (neg == 0) neg = negC;
+ mul = fp::func_ptr_cast<void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y)>(op.fp2_mulA_);
+ if (mul == 0) mul = mulC;
+ sqr = fp::func_ptr_cast<void (*)(Fp2T& y, const Fp2T& x)>(op.fp2_sqrA_);
+ if (sqr == 0) sqr = sqrC;
+ mul_xi = fp::func_ptr_cast<void (*)(Fp2T&, const Fp2T&)>(op.fp2_mul_xiA_);
+#endif
+ op.fp2_inv = fp2_invW;
+ if (mul_xi == 0) {
+ if (op.xi_a == 1) {
+ mul_xi = fp2_mul_xi_1_1iC;
+ } else {
+ mul_xi = fp2_mul_xiC;
+ }
+ }
+ FpDblT<Fp>::init();
+ Fp2DblT<Fp>::init();
+ // call init before Fp2::pow because FpDbl is used in Fp2T
+ const Fp2T xi(op.xi_a, 1);
+ const mpz_class& p = Fp::getOp().mp;
+ Fp2T::pow(g[0], xi, (p - 1) / 6); // g = xi^((p-1)/6)
+ for (size_t i = 1; i < gN; i++) {
+ g[i] = g[i - 1] * g[0];
+ }
+ /*
+ permutate [0, 1, 2, 3, 4] => [1, 3, 0, 2, 4]
+ g[0] = g^2
+ g[1] = g^4
+ g[2] = g^1
+ g[3] = g^3
+ g[4] = g^5
+ */
+ {
+ Fp2T t = g[0];
+ g[0] = g[1];
+ g[1] = g[3];
+ g[3] = g[2];
+ g[2] = t;
+ }
+ for (size_t i = 0; i < gN; i++) {
+ Fp2T t(g[i].a, g[i].b);
+ if (Fp::getOp().pmod4 == 3) Fp::neg(t.b, t.b);
+ Fp2T::mul(g2[i], t, g[i]);
+ g3[i] = g[i] * g2[i];
+ }
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("Fp2T:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("Fp2T:save");
+ }
+ template<class S>
+ void setArray(const S *buf, size_t n)
+ {
+ bool b;
+ setArray(&b, buf, n);
+ if (!b) throw cybozu::Exception("Fp2T:setArray");
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_STRING
+ Fp2T(const std::string& a, const std::string& b, int base = 0) : a(a, base), b(b, base) {}
+ friend std::istream& operator>>(std::istream& is, Fp2T& self)
+ {
+ self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const Fp2T& self)
+ {
+ self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os));
+ return os;
+ }
+#endif
+private:
+ /*
+ default Fp2T operator
+ Fp2T = Fp[i]/(i^2 + 1)
+ */
+ static void addC(Fp2T& z, const Fp2T& x, const Fp2T& y)
+ {
+ Fp::add(z.a, x.a, y.a);
+ Fp::add(z.b, x.b, y.b);
+ }
+ static void subC(Fp2T& z, const Fp2T& x, const Fp2T& y)
+ {
+ Fp::sub(z.a, x.a, y.a);
+ Fp::sub(z.b, x.b, y.b);
+ }
+ static void negC(Fp2T& y, const Fp2T& x)
+ {
+ Fp::neg(y.a, x.a);
+ Fp::neg(y.b, x.b);
+ }
+#if 0
+ /*
+ x = a + bi, y = c + di, i^2 = -1
+ z = xy = (a + bi)(c + di) = (ac - bd) + (ad + bc)i
+ ad+bc = (a + b)(c + d) - ac - bd
+ # of mod = 3
+ */
+ static void fp2_mulW(Unit *z, const Unit *x, const Unit *y)
+ {
+ const Fp *px = reinterpret_cast<const Fp*>(x);
+ const Fp *py = reinterpret_cast<const Fp*>(y);
+ const Fp& a = px[0];
+ const Fp& b = px[1];
+ const Fp& c = py[0];
+ const Fp& d = py[1];
+ Fp *pz = reinterpret_cast<Fp*>(z);
+ Fp t1, t2, ac, bd;
+ Fp::add(t1, a, b);
+ Fp::add(t2, c, d);
+ t1 *= t2; // (a + b)(c + d)
+ Fp::mul(ac, a, c);
+ Fp::mul(bd, b, d);
+ Fp::sub(pz[0], ac, bd); // ac - bd
+ Fp::sub(pz[1], t1, ac);
+ pz[1] -= bd;
+ }
+ static void fp2_mulNFW(Fp2T& z, const Fp2T& x, const Fp2T& y)
+ {
+ const fp::Op& op = Fp::op_;
+ op.fp2_mulNF((Unit*)&z, (const Unit*)&x, (const Unit*)&y, op.p);
+ }
+#endif
+ static void mulC(Fp2T& z, const Fp2T& x, const Fp2T& y)
+ {
+ Fp2Dbl d;
+ Fp2Dbl::mulPre(d, x, y);
+ FpDbl::mod(z.a, d.a);
+ FpDbl::mod(z.b, d.b);
+ }
+ /*
+ x = a + bi, i^2 = -1
+ y = x^2 = (a + bi)^2 = (a + b)(a - b) + 2abi
+ */
+ static void sqrC(Fp2T& y, const Fp2T& x)
+ {
+ const Fp& a = x.a;
+ const Fp& b = x.b;
+#if 1 // faster than using FpDbl
+ Fp t1, t2, t3;
+ Fp::add(t1, b, b); // 2b
+ t1 *= a; // 2ab
+ Fp::add(t2, a, b); // a + b
+ Fp::sub(t3, a, b); // a - b
+ Fp::mul(y.a, t2, t3); // (a + b)(a - b)
+ y.b = t1;
+#else
+ Fp t1, t2;
+ FpDbl d1, d2;
+ Fp::addPre(t1, b, b); // 2b
+ FpDbl::mulPre(d2, t1, a); // 2ab
+ Fp::addPre(t1, a, b); // a + b
+ Fp::sub(t2, a, b); // a - b
+ FpDbl::mulPre(d1, t1, t2); // (a + b)(a - b)
+ FpDbl::mod(py[0], d1);
+ FpDbl::mod(py[1], d2);
+#endif
+ }
+ /*
+ xi = xi_a + i
+ x = a + bi
+ y = (a + bi)xi = (a + bi)(xi_a + i)
+ =(a * x_ia - b) + (a + b xi_a)i
+ */
+ static void fp2_mul_xiC(Fp2T& y, const Fp2T& x)
+ {
+ const Fp& a = x.a;
+ const Fp& b = x.b;
+ Fp t;
+ Fp::mulUnit(t, a, Fp::getOp().xi_a);
+ t -= b;
+ Fp::mulUnit(y.b, b, Fp::getOp().xi_a);
+ y.b += a;
+ y.a = t;
+ }
+ /*
+ xi = 1 + i ; xi_a = 1
+ y = (a + bi)xi = (a - b) + (a + b)i
+ */
+ static void fp2_mul_xi_1_1iC(Fp2T& y, const Fp2T& x)
+ {
+ const Fp& a = x.a;
+ const Fp& b = x.b;
+ Fp t;
+ Fp::add(t, a, b);
+ Fp::sub(y.a, a, b);
+ y.b = t;
+ }
+ /*
+ x = a + bi
+ 1 / x = (a - bi) / (a^2 + b^2)
+ */
+ static void fp2_invW(Unit *y, const Unit *x)
+ {
+ const Fp *px = reinterpret_cast<const Fp*>(x);
+ Fp *py = reinterpret_cast<Fp*>(y);
+ const Fp& a = px[0];
+ const Fp& b = px[1];
+ Fp aa, bb;
+ Fp::sqr(aa, a);
+ Fp::sqr(bb, b);
+ aa += bb;
+ Fp::inv(aa, aa); // aa = 1 / (a^2 + b^2)
+ Fp::mul(py[0], a, aa);
+ Fp::mul(py[1], b, aa);
+ Fp::neg(py[1], py[1]);
+ }
+};
+
+#ifdef MCL_XBYAK_DIRECT_CALL
+template<class Fp_> void (*Fp2T<Fp_>::add)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+template<class Fp_> void (*Fp2T<Fp_>::sub)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+template<class Fp_> void (*Fp2T<Fp_>::neg)(Fp2T& y, const Fp2T& x);
+template<class Fp_> void (*Fp2T<Fp_>::mul)(Fp2T& z, const Fp2T& x, const Fp2T& y);
+template<class Fp_> void (*Fp2T<Fp_>::sqr)(Fp2T& y, const Fp2T& x);
+#endif
+template<class Fp_> void (*Fp2T<Fp_>::mul_xi)(Fp2T& y, const Fp2T& x);
+
+template<class Fp>
+struct Fp2DblT {
+ typedef FpDblT<Fp> FpDbl;
+ typedef Fp2T<Fp> Fp2;
+ typedef fp::Unit Unit;
+ FpDbl a, b;
+ static void add(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y)
+ {
+ FpDbl::add(z.a, x.a, y.a);
+ FpDbl::add(z.b, x.b, y.b);
+ }
+ static void addPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y)
+ {
+ FpDbl::addPre(z.a, x.a, y.a);
+ FpDbl::addPre(z.b, x.b, y.b);
+ }
+ static void sub(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y)
+ {
+ FpDbl::sub(z.a, x.a, y.a);
+ FpDbl::sub(z.b, x.b, y.b);
+ }
+ static void subPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y)
+ {
+ FpDbl::subPre(z.a, x.a, y.a);
+ FpDbl::subPre(z.b, x.b, y.b);
+ }
+ static void neg(Fp2DblT& y, const Fp2DblT& x)
+ {
+ FpDbl::neg(y.a, x.a);
+ FpDbl::neg(y.b, x.b);
+ }
+ static void mul_xi(Fp2DblT& y, const Fp2DblT& x)
+ {
+ const uint32_t xi_a = Fp2::get_xi_a();
+ if (xi_a == 1) {
+ FpDbl t;
+ FpDbl::add(t, x.a, x.b);
+ FpDbl::sub(y.a, x.a, x.b);
+ y.b = t;
+ } else {
+ FpDbl t;
+ FpDbl::mulUnit(t, x.a, xi_a);
+ FpDbl::sub(t, t, x.b);
+ FpDbl::mulUnit(y.b, x.b, xi_a);
+ FpDbl::add(y.b, y.b, x.a);
+ y.a = t;
+ }
+ }
+ static void (*mulPre)(Fp2DblT&, const Fp2&, const Fp2&);
+ static void (*sqrPre)(Fp2DblT&, const Fp2&);
+ static void mod(Fp2& y, const Fp2DblT& x)
+ {
+ FpDbl::mod(y.a, x.a);
+ FpDbl::mod(y.b, x.b);
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ friend std::ostream& operator<<(std::ostream& os, const Fp2DblT& x)
+ {
+ return os << x.a << ' ' << x.b;
+ }
+#endif
+ void operator+=(const Fp2DblT& x) { add(*this, *this, x); }
+ void operator-=(const Fp2DblT& x) { sub(*this, *this, x); }
+ static void init()
+ {
+ const mcl::fp::Op& op = Fp::getOp();
+ if (op.fp2Dbl_mulPreA_) {
+ mulPre = fp::func_ptr_cast<void (*)(Fp2DblT&, const Fp2&, const Fp2&)>(op.fp2Dbl_mulPreA_);
+ } else {
+ if (op.isFullBit) {
+ mulPre = fp2Dbl_mulPreW<true>;
+ } else {
+ mulPre = fp2Dbl_mulPreW<false>;
+ }
+ }
+ if (op.fp2Dbl_sqrPreA_) {
+ sqrPre = fp::func_ptr_cast<void (*)(Fp2DblT&, const Fp2&)>(op.fp2Dbl_sqrPreA_);
+ } else {
+ if (op.isFullBit) {
+ sqrPre = fp2Dbl_sqrPreW<true>;
+ } else {
+ sqrPre = fp2Dbl_sqrPreW<false>;
+ }
+ }
+ }
+ /*
+ Fp2Dbl::mulPre by FpDblT
+ @note mod of NIST_P192 is fast
+ */
+ template<bool isFullBit>
+ static void fp2Dbl_mulPreW(Fp2DblT& z, const Fp2& x, const Fp2& y)
+ {
+ const Fp& a = x.a;
+ const Fp& b = x.b;
+ const Fp& c = y.a;
+ const Fp& d = y.b;
+ FpDbl& d0 = z.a;
+ FpDbl& d1 = z.b;
+ FpDbl d2;
+ Fp s, t;
+ if (isFullBit) {
+ Fp::add(s, a, b);
+ Fp::add(t, c, d);
+ } else {
+ Fp::addPre(s, a, b);
+ Fp::addPre(t, c, d);
+ }
+ FpDbl::mulPre(d1, s, t); // (a + b)(c + d)
+ FpDbl::mulPre(d0, a, c);
+ FpDbl::mulPre(d2, b, d);
+ if (isFullBit) {
+ FpDbl::sub(d1, d1, d0); // (a + b)(c + d) - ac
+ FpDbl::sub(d1, d1, d2); // (a + b)(c + d) - ac - bd
+ } else {
+ FpDbl::subPre(d1, d1, d0);
+ FpDbl::subPre(d1, d1, d2);
+ }
+ FpDbl::sub(d0, d0, d2); // ac - bd
+ }
+ template<bool isFullBit>
+ static void fp2Dbl_sqrPreW(Fp2DblT& y, const Fp2& x)
+ {
+ Fp t1, t2;
+ if (isFullBit) {
+ Fp::add(t1, x.b, x.b); // 2b
+ Fp::add(t2, x.a, x.b); // a + b
+ } else {
+ Fp::addPre(t1, x.b, x.b); // 2b
+ Fp::addPre(t2, x.a, x.b); // a + b
+ }
+ FpDbl::mulPre(y.b, t1, x.a); // 2ab
+ Fp::sub(t1, x.a, x.b); // a - b
+ FpDbl::mulPre(y.a, t1, t2); // (a + b)(a - b)
+ }
+};
+
+template<class Fp> void (*Fp2DblT<Fp>::mulPre)(Fp2DblT&, const Fp2T<Fp>&, const Fp2T<Fp>&);
+template<class Fp> void (*Fp2DblT<Fp>::sqrPre)(Fp2DblT&, const Fp2T<Fp>&);
+
+template<class Fp> Fp2T<Fp> Fp2T<Fp>::g[Fp2T<Fp>::gN];
+template<class Fp> Fp2T<Fp> Fp2T<Fp>::g2[Fp2T<Fp>::gN];
+template<class Fp> Fp2T<Fp> Fp2T<Fp>::g3[Fp2T<Fp>::gN];
+
+template<class Fp>
+struct Fp6DblT;
+/*
+ Fp6T = Fp2[v] / (v^3 - xi)
+ x = a + b v + c v^2
+*/
+template<class _Fp>
+struct Fp6T : public fp::Serializable<Fp6T<_Fp>,
+ fp::Operator<Fp6T<_Fp> > > {
+ typedef _Fp Fp;
+ typedef Fp2T<Fp> Fp2;
+ typedef Fp2DblT<Fp> Fp2Dbl;
+ typedef Fp6DblT<Fp> Fp6Dbl;
+ typedef Fp BaseFp;
+ Fp2 a, b, c;
+ Fp6T() { }
+ Fp6T(int64_t a) : a(a) , b(0) , c(0) { }
+ Fp6T(const Fp2& a, const Fp2& b, const Fp2& c) : a(a) , b(b) , c(c) { }
+ void clear()
+ {
+ a.clear();
+ b.clear();
+ c.clear();
+ }
+ Fp* getFp0() { return a.getFp0(); }
+ const Fp* getFp0() const { return a.getFp0(); }
+ Fp2* getFp2() { return &a; }
+ const Fp2* getFp2() const { return &a; }
+ void set(const Fp2 &a_, const Fp2 &b_, const Fp2 &c_)
+ {
+ a = a_;
+ b = b_;
+ c = c_;
+ }
+ bool isZero() const
+ {
+ return a.isZero() && b.isZero() && c.isZero();
+ }
+ bool isOne() const
+ {
+ return a.isOne() && b.isZero() && c.isZero();
+ }
+ bool operator==(const Fp6T& rhs) const
+ {
+ return a == rhs.a && b == rhs.b && c == rhs.c;
+ }
+ bool operator!=(const Fp6T& rhs) const { return !operator==(rhs); }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+ a.load(pb, is, ioMode); if (!*pb) return;
+ b.load(pb, is, ioMode); if (!*pb) return;
+ c.load(pb, is, ioMode); if (!*pb) return;
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ a.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ b.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ c.save(pb, os, ioMode);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("Fp6T:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("Fp6T:save");
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_STRING
+ friend std::istream& operator>>(std::istream& is, Fp6T& self)
+ {
+ self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const Fp6T& self)
+ {
+ self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os));
+ return os;
+ }
+#endif
+ static void add(Fp6T& z, const Fp6T& x, const Fp6T& y)
+ {
+ Fp2::add(z.a, x.a, y.a);
+ Fp2::add(z.b, x.b, y.b);
+ Fp2::add(z.c, x.c, y.c);
+ }
+ static void sub(Fp6T& z, const Fp6T& x, const Fp6T& y)
+ {
+ Fp2::sub(z.a, x.a, y.a);
+ Fp2::sub(z.b, x.b, y.b);
+ Fp2::sub(z.c, x.c, y.c);
+ }
+ static void neg(Fp6T& y, const Fp6T& x)
+ {
+ Fp2::neg(y.a, x.a);
+ Fp2::neg(y.b, x.b);
+ Fp2::neg(y.c, x.c);
+ }
+ /*
+ x = a + bv + cv^2, v^3 = xi
+ x^2 = (a^2 + 2bc xi) + (c^2 xi + 2ab)v + (b^2 + 2ac)v^2
+
+ b^2 + 2ac = (a + b + c)^2 - a^2 - 2bc - c^2 - 2ab
+ */
+ static void sqr(Fp6T& y, const Fp6T& x)
+ {
+ Fp2 t1, t2, t3;
+ Fp2::mul(t1, x.a, x.b);
+ t1 += t1; // 2ab
+ Fp2::mul(t2, x.b, x.c);
+ t2 += t2; // 2bc
+ Fp2::sqr(t3, x.c); // c^2
+ Fp2::add(y.c, x.a, x.c); // a + c, destroy y.c
+ y.c += x.b; // a + b + c
+ Fp2::sqr(y.b, y.c); // (a + b + c)^2, destroy y.b
+ y.b -= t2; // (a + b + c)^2 - 2bc
+ Fp2::mul_xi(t2, t2); // 2bc xi
+ Fp2::sqr(y.a, x.a); // a^2, destroy y.a
+ y.b -= y.a; // (a + b + c)^2 - 2bc - a^2
+ y.a += t2; // a^2 + 2bc xi
+ Fp2::sub(y.c, y.b, t3); // (a + b + c)^2 - 2bc - a^2 - c^2
+ Fp2::mul_xi(y.b, t3); // c^2 xi
+ y.b += t1; // c^2 xi + 2ab
+ y.c -= t1; // b^2 + 2ac
+ }
+ static inline void mul(Fp6T& z, const Fp6T& x, const Fp6T& y);
+ /*
+ x = a + bv + cv^2, v^3 = xi
+ y = 1/x = p/q where
+ p = (a^2 - bc xi) + (c^2 xi - ab)v + (b^2 - ac)v^2
+ q = c^3 xi^2 + b(b^2 - 3ac)xi + a^3
+ = (a^2 - bc xi)a + ((c^2 xi - ab)c + (b^2 - ac)b) xi
+ */
+ static void inv(Fp6T& y, const Fp6T& x)
+ {
+ const Fp2& a = x.a;
+ const Fp2& b = x.b;
+ const Fp2& c = x.c;
+ Fp2 aa, bb, cc, ab, bc, ac;
+ Fp2::sqr(aa, a);
+ Fp2::sqr(bb, b);
+ Fp2::sqr(cc, c);
+ Fp2::mul(ab, a, b);
+ Fp2::mul(bc, b, c);
+ Fp2::mul(ac, c, a);
+
+ Fp6T p;
+ Fp2::mul_xi(p.a, bc);
+ Fp2::sub(p.a, aa, p.a); // a^2 - bc xi
+ Fp2::mul_xi(p.b, cc);
+ p.b -= ab; // c^2 xi - ab
+ Fp2::sub(p.c, bb, ac); // b^2 - ac
+ Fp2 q, t;
+ Fp2::mul(q, p.b, c);
+ Fp2::mul(t, p.c, b);
+ q += t;
+ Fp2::mul_xi(q, q);
+ Fp2::mul(t, p.a, a);
+ q += t;
+ Fp2::inv(q, q);
+
+ Fp2::mul(y.a, p.a, q);
+ Fp2::mul(y.b, p.b, q);
+ Fp2::mul(y.c, p.c, q);
+ }
+};
+
+template<class Fp>
+struct Fp6DblT {
+ typedef Fp2T<Fp> Fp2;
+ typedef Fp6T<Fp> Fp6;
+ typedef Fp2DblT<Fp> Fp2Dbl;
+ typedef Fp6DblT<Fp> Fp6Dbl;
+ typedef fp::Unit Unit;
+ Fp2Dbl a, b, c;
+ static void add(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y)
+ {
+ Fp2Dbl::add(z.a, x.a, y.a);
+ Fp2Dbl::add(z.b, x.b, y.b);
+ Fp2Dbl::add(z.c, x.c, y.c);
+ }
+ static void sub(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y)
+ {
+ Fp2Dbl::sub(z.a, x.a, y.a);
+ Fp2Dbl::sub(z.b, x.b, y.b);
+ Fp2Dbl::sub(z.c, x.c, y.c);
+ }
+ /*
+ x = a + bv + cv^2, y = d + ev + fv^2, v^3 = xi
+ xy = (ad + (bf + ce)xi) + ((ae + bd) + cf xi)v + ((af + cd) + be)v^2
+ bf + ce = (b + c)(e + f) - be - cf
+ ae + bd = (a + b)(e + d) - ad - be
+ af + cd = (a + c)(d + f) - ad - cf
+ */
+ static void mulPre(Fp6DblT& z, const Fp6& x, const Fp6& y)
+ {
+//clk.begin();
+ const Fp2& a = x.a;
+ const Fp2& b = x.b;
+ const Fp2& c = x.c;
+ const Fp2& d = y.a;
+ const Fp2& e = y.b;
+ const Fp2& f = y.c;
+ Fp2Dbl& za = z.a;
+ Fp2Dbl& zb = z.b;
+ Fp2Dbl& zc = z.c;
+ Fp2Dbl BE;
+ Fp2Dbl::mulPre(za, a, d);
+ Fp2Dbl::mulPre(BE, b, e);
+ Fp2Dbl::mulPre(zb, c, f);
+
+ Fp2 t1, t2, t3, t4;
+ Fp2::add(t1, b, c);
+ Fp2::add(t2, e, f);
+ Fp2Dbl T1;
+ Fp2Dbl::mulPre(T1, t1, t2);
+ Fp2Dbl::sub(T1, T1, BE);
+ Fp2Dbl::sub(T1, T1, zb);
+ Fp2Dbl::mul_xi(T1, T1);
+
+ Fp2::add(t2, a, b);
+ Fp2::add(t3, e, d);
+ Fp2Dbl T2;
+ Fp2Dbl::mulPre(T2, t2, t3);
+ Fp2Dbl::sub(T2, T2, za);
+ Fp2Dbl::sub(T2, T2, BE);
+
+ Fp2::add(t3, a, c);
+ Fp2::add(t4, d, f);
+ Fp2Dbl::mulPre(zc, t3, t4);
+ Fp2Dbl::sub(zc, zc, za);
+ Fp2Dbl::sub(zc, zc, zb);
+
+ Fp2Dbl::add(za, za, T1);
+ Fp2Dbl::mul_xi(zb, zb);
+ Fp2Dbl::add(zb, zb, T2);
+ Fp2Dbl::add(zc, zc, BE);
+//clk.end();
+ }
+ static void mod(Fp6& y, const Fp6Dbl& x)
+ {
+ Fp2Dbl::mod(y.a, x.a);
+ Fp2Dbl::mod(y.b, x.b);
+ Fp2Dbl::mod(y.c, x.c);
+ }
+};
+
+template<class Fp>
+inline void Fp6T<Fp>::mul(Fp6T<Fp>& z, const Fp6T<Fp>& x, const Fp6T<Fp>& y)
+{
+ Fp6DblT<Fp> Z;
+ Fp6DblT<Fp>::mulPre(Z, x, y);
+ Fp6DblT<Fp>::mod(z, Z);
+}
+
+/*
+ Fp12T = Fp6[w] / (w^2 - v)
+ x = a + b w
+*/
+template<class Fp>
+struct Fp12T : public fp::Serializable<Fp12T<Fp>,
+ fp::Operator<Fp12T<Fp> > > {
+ typedef Fp2T<Fp> Fp2;
+ typedef Fp6T<Fp> Fp6;
+ typedef Fp2DblT<Fp> Fp2Dbl;
+ typedef Fp6DblT<Fp> Fp6Dbl;
+ typedef Fp BaseFp;
+ Fp6 a, b;
+ Fp12T() {}
+ Fp12T(int64_t a) : a(a), b(0) {}
+ Fp12T(const Fp6& a, const Fp6& b) : a(a), b(b) {}
+ void clear()
+ {
+ a.clear();
+ b.clear();
+ }
+ void setOne()
+ {
+ clear();
+ a.a.a = 1;
+ }
+
+ Fp* getFp0() { return a.getFp0(); }
+ const Fp* getFp0() const { return a.getFp0(); }
+ Fp2* getFp2() { return a.getFp2(); }
+ const Fp2* getFp2() const { return a.getFp2(); }
+ void set(const Fp2& v0, const Fp2& v1, const Fp2& v2, const Fp2& v3, const Fp2& v4, const Fp2& v5)
+ {
+ a.set(v0, v1, v2);
+ b.set(v3, v4, v5);
+ }
+
+ bool isZero() const
+ {
+ return a.isZero() && b.isZero();
+ }
+ bool isOne() const
+ {
+ return a.isOne() && b.isZero();
+ }
+ bool operator==(const Fp12T& rhs) const
+ {
+ return a == rhs.a && b == rhs.b;
+ }
+ bool operator!=(const Fp12T& rhs) const { return !operator==(rhs); }
+ static void add(Fp12T& z, const Fp12T& x, const Fp12T& y)
+ {
+ Fp6::add(z.a, x.a, y.a);
+ Fp6::add(z.b, x.b, y.b);
+ }
+ static void sub(Fp12T& z, const Fp12T& x, const Fp12T& y)
+ {
+ Fp6::sub(z.a, x.a, y.a);
+ Fp6::sub(z.b, x.b, y.b);
+ }
+ static void neg(Fp12T& z, const Fp12T& x)
+ {
+ Fp6::neg(z.a, x.a);
+ Fp6::neg(z.b, x.b);
+ }
+ /*
+ z = x v + y
+ in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2
+ */
+ static void mulVadd(Fp6& z, const Fp6& x, const Fp6& y)
+ {
+ Fp2 t;
+ Fp2::mul_xi(t, x.c);
+ Fp2::add(z.c, x.b, y.c);
+ Fp2::add(z.b, x.a, y.b);
+ Fp2::add(z.a, t, y.a);
+ }
+ static void mulVadd(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y)
+ {
+ Fp2Dbl t;
+ Fp2Dbl::mul_xi(t, x.c);
+ Fp2Dbl::add(z.c, x.b, y.c);
+ Fp2Dbl::add(z.b, x.a, y.b);
+ Fp2Dbl::add(z.a, t, y.a);
+ }
+ /*
+ x = a + bw, y = c + dw, w^2 = v
+ z = xy = (a + bw)(c + dw) = (ac + bdv) + (ad + bc)w
+ ad+bc = (a + b)(c + d) - ac - bd
+
+ in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2
+ */
+ static void mul(Fp12T& z, const Fp12T& x, const Fp12T& y)
+ {
+ // 4.7Kclk -> 4.55Kclk
+ const Fp6& a = x.a;
+ const Fp6& b = x.b;
+ const Fp6& c = y.a;
+ const Fp6& d = y.b;
+ Fp6 t1, t2;
+ Fp6::add(t1, a, b);
+ Fp6::add(t2, c, d);
+#if 1
+ Fp6Dbl T, AC, BD;
+ Fp6Dbl::mulPre(AC, a, c);
+ Fp6Dbl::mulPre(BD, b, d);
+ mulVadd(T, BD, AC);
+ Fp6Dbl::mod(z.a, T);
+ Fp6Dbl::mulPre(T, t1, t2); // (a + b)(c + d)
+ Fp6Dbl::sub(T, T, AC);
+ Fp6Dbl::sub(T, T, BD);
+ Fp6Dbl::mod(z.b, T);
+#else
+ Fp6 ac, bd;
+ t1 *= t2; // (a + b)(c + d)
+ Fp6::mul(ac, a, c);
+ Fp6::mul(bd, b, d);
+ mulVadd(z.a, bd, ac);
+ t1 -= ac;
+ Fp6::sub(z.b, t1, bd);
+#endif
+ }
+ /*
+ x = a + bw, w^2 = v
+ y = x^2 = (a + bw)^2 = (a^2 + b^2v) + 2abw
+ a^2 + b^2v = (a + b)(bv + a) - (abv + ab)
+ */
+ static void sqr(Fp12T& y, const Fp12T& x)
+ {
+ const Fp6& a = x.a;
+ const Fp6& b = x.b;
+ Fp6 t0, t1;
+ Fp6::add(t0, a, b); // a + b
+ mulVadd(t1, b, a); // bv + a
+ t0 *= t1; // (a + b)(bv + a)
+ Fp6::mul(t1, a, b); // ab
+ Fp6::add(y.b, t1, t1); // 2ab
+ mulVadd(y.a, t1, t1); // abv + ab
+ Fp6::sub(y.a, t0, y.a);
+ }
+ /*
+ x = a + bw, w^2 = v
+ y = 1/x = (a - bw) / (a^2 - b^2v)
+ */
+ static void inv(Fp12T& y, const Fp12T& x)
+ {
+ const Fp6& a = x.a;
+ const Fp6& b = x.b;
+ Fp6 t0, t1;
+ Fp6::sqr(t0, a);
+ Fp6::sqr(t1, b);
+ Fp2::mul_xi(t1.c, t1.c);
+ t0.a -= t1.c;
+ t0.b -= t1.a;
+ t0.c -= t1.b; // t0 = a^2 - b^2v
+ Fp6::inv(t0, t0);
+ Fp6::mul(y.a, x.a, t0);
+ Fp6::mul(y.b, x.b, t0);
+ Fp6::neg(y.b, y.b);
+ }
+ /*
+ y = 1 / x = conjugate of x if |x| = 1
+ */
+ static void unitaryInv(Fp12T& y, const Fp12T& x)
+ {
+ if (&y != &x) y.a = x.a;
+ Fp6::neg(y.b, x.b);
+ }
+ /*
+ Frobenius
+ i^2 = -1
+ (a + bi)^p = a + bi^p in Fp
+ = a + bi if p = 1 mod 4
+ = a - bi if p = 3 mod 4
+
+ g = xi^(p - 1) / 6
+ v^3 = xi in Fp2
+ v^p = ((v^6) ^ (p-1)/6) v = g^2 v
+ v^2p = g^4 v^2
+ (a + bv + cv^2)^p in Fp6
+ = F(a) + F(b)g^2 v + F(c) g^4 v^2
+
+ w^p = ((w^6) ^ (p-1)/6) w = g w
+ ((a + bv + cv^2)w)^p in Fp12T
+ = (F(a) g + F(b) g^3 v + F(c) g^5 v^2)w
+ */
+ static void Frobenius(Fp12T& y, const Fp12T& x)
+ {
+ for (int i = 0; i < 6; i++) {
+ Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]);
+ }
+ for (int i = 1; i < 6; i++) {
+ y.getFp2()[i] *= Fp2::get_gTbl()[i - 1];
+ }
+ }
+ static void Frobenius2(Fp12T& y, const Fp12T& x)
+ {
+#if 0
+ Frobenius(y, x);
+ Frobenius(y, y);
+#else
+ y.getFp2()[0] = x.getFp2()[0];
+ if (Fp::getOp().pmod4 == 1) {
+ for (int i = 1; i < 6; i++) {
+ Fp2::mul(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i]);
+ }
+ } else {
+ for (int i = 1; i < 6; i++) {
+ Fp2::mulFp(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i - 1].a);
+ }
+ }
+#endif
+ }
+ static void Frobenius3(Fp12T& y, const Fp12T& x)
+ {
+#if 0
+ Frobenius(y, x);
+ Frobenius(y, y);
+ Frobenius(y, y);
+#else
+ Fp2::Frobenius(y.getFp2()[0], x.getFp2()[0]);
+ for (int i = 1; i < 6; i++) {
+ Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]);
+ y.getFp2()[i] *= Fp2::get_g3Tbl()[i - 1];
+ }
+#endif
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+ a.load(pb, is, ioMode); if (!*pb) return;
+ b.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ a.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ b.save(pb, os, ioMode);
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("Fp12T:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("Fp12T:save");
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_STRING
+ friend std::istream& operator>>(std::istream& is, Fp12T& self)
+ {
+ self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const Fp12T& self)
+ {
+ self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os));
+ return os;
+ }
+#endif
+};
+
+/*
+ convert multiplicative group to additive group
+*/
+template<class T>
+struct GroupMtoA : public T {
+ static T& castT(GroupMtoA& x) { return static_cast<T&>(x); }
+ static const T& castT(const GroupMtoA& x) { return static_cast<const T&>(x); }
+ void clear()
+ {
+ castT(*this) = 1;
+ }
+ bool isZero() const { return castT(*this).isOne(); }
+ static void add(GroupMtoA& z, const GroupMtoA& x, const GroupMtoA& y)
+ {
+ T::mul(castT(z), castT(x), castT(y));
+ }
+ static void dbl(GroupMtoA& y, const GroupMtoA& x)
+ {
+ T::sqr(castT(y), castT(x));
+ }
+ static void neg(GroupMtoA& y, const GroupMtoA& x)
+ {
+ // assume Fp12
+ T::unitaryInv(castT(y), castT(x));
+ }
+ static void Frobenus(GroupMtoA& y, const GroupMtoA& x)
+ {
+ T::Frobenius(castT(y), castT(x));
+ }
+ template<class INT>
+ static void mul(GroupMtoA& z, const GroupMtoA& x, const INT& y)
+ {
+ T::pow(castT(z), castT(x), y);
+ }
+ template<class INT>
+ static void mulGeneric(GroupMtoA& z, const GroupMtoA& x, const INT& y)
+ {
+ T::powGeneric(castT(z), castT(x), y);
+ }
+ void operator+=(const GroupMtoA& rhs)
+ {
+ add(*this, *this, rhs);
+ }
+ void normalize() {}
+private:
+ bool isOne() const;
+};
+
+} // mcl
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/gmp_util.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/gmp_util.hpp
new file mode 100644
index 000000000..bcbd91a1e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/gmp_util.hpp
@@ -0,0 +1,954 @@
+#pragma once
+/**
+ @file
+ @brief util function for gmp
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <stdint.h>
+#include <cybozu/exception.hpp>
+#include <mcl/randgen.hpp>
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4616)
+ #pragma warning(disable : 4800)
+ #pragma warning(disable : 4244)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4512)
+ #pragma warning(disable : 4146)
+#endif
+#if defined(__EMSCRIPTEN__) || defined(__wasm__)
+ #define MCL_USE_VINT
+#endif
+#ifdef MCL_USE_VINT
+#include <mcl/vint.hpp>
+typedef mcl::Vint mpz_class;
+#else
+#include <gmpxx.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+ #include <cybozu/link_mpir.hpp>
+#endif
+#endif
+
+#ifndef MCL_SIZEOF_UNIT
+ #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32)
+ #define MCL_SIZEOF_UNIT 4
+ #else
+ #define MCL_SIZEOF_UNIT 8
+ #endif
+#endif
+
+namespace mcl {
+
+namespace fp {
+
+#if MCL_SIZEOF_UNIT == 8
+typedef uint64_t Unit;
+#else
+typedef uint32_t Unit;
+#endif
+#define MCL_UNIT_BIT_SIZE (MCL_SIZEOF_UNIT * 8)
+
+} // mcl::fp
+
+namespace gmp {
+
+typedef mpz_class ImplType;
+
+// z = [buf[n-1]:..:buf[1]:buf[0]]
+// eg. buf[] = {0x12345678, 0xaabbccdd}; => z = 0xaabbccdd12345678;
+template<class T>
+void setArray(bool *pb, mpz_class& z, const T *buf, size_t n)
+{
+#ifdef MCL_USE_VINT
+ z.setArray(pb, buf, n);
+#else
+ mpz_import(z.get_mpz_t(), n, -1, sizeof(*buf), 0, 0, buf);
+ *pb = true;
+#endif
+}
+/*
+ buf[0, size) = x
+ buf[size, maxSize) with zero
+*/
+template<class T, class U>
+bool getArray_(T *buf, size_t maxSize, const U *x, int xn)//const mpz_srcptr x)
+{
+ const size_t bufByteSize = sizeof(T) * maxSize;
+ if (xn < 0) return false;
+ size_t xByteSize = sizeof(*x) * xn;
+ if (xByteSize > bufByteSize) return false;
+ memcpy(buf, x, xByteSize);
+ memset((char*)buf + xByteSize, 0, bufByteSize - xByteSize);
+ return true;
+}
+template<class T>
+void getArray(bool *pb, T *buf, size_t maxSize, const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ *pb = getArray_(buf, maxSize, x.getUnit(), x.getUnitSize());
+#else
+ *pb = getArray_(buf, maxSize, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size);
+#endif
+}
+inline void set(mpz_class& z, uint64_t x)
+{
+ bool b;
+ setArray(&b, z, &x, 1);
+ assert(b);
+ (void)b;
+}
+inline void setStr(bool *pb, mpz_class& z, const char *str, int base = 0)
+{
+#ifdef MCL_USE_VINT
+ z.setStr(pb, str, base);
+#else
+ *pb = z.set_str(str, base) == 0;
+#endif
+}
+
+/*
+ set buf with string terminated by '\0'
+ return strlen(buf) if success else 0
+*/
+inline size_t getStr(char *buf, size_t bufSize, const mpz_class& z, int base = 10)
+{
+#ifdef MCL_USE_VINT
+ return z.getStr(buf, bufSize, base);
+#else
+ __gmp_alloc_cstring tmp(mpz_get_str(0, base, z.get_mpz_t()));
+ size_t n = strlen(tmp.str);
+ if (n + 1 > bufSize) return 0;
+ memcpy(buf, tmp.str, n + 1);
+ return n;
+#endif
+}
+
+#ifndef CYBOZU_DONT_USE_STRING
+inline void getStr(std::string& str, const mpz_class& z, int base = 10)
+{
+#ifdef MCL_USE_VINT
+ z.getStr(str, base);
+#else
+ str = z.get_str(base);
+#endif
+}
+inline std::string getStr(const mpz_class& z, int base = 10)
+{
+ std::string s;
+ gmp::getStr(s, z, base);
+ return s;
+}
+#endif
+
+inline void add(mpz_class& z, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::add(z, x, y);
+#else
+ mpz_add(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+#ifndef MCL_USE_VINT
+inline void add(mpz_class& z, const mpz_class& x, unsigned int y)
+{
+ mpz_add_ui(z.get_mpz_t(), x.get_mpz_t(), y);
+}
+inline void sub(mpz_class& z, const mpz_class& x, unsigned int y)
+{
+ mpz_sub_ui(z.get_mpz_t(), x.get_mpz_t(), y);
+}
+inline void mul(mpz_class& z, const mpz_class& x, unsigned int y)
+{
+ mpz_mul_ui(z.get_mpz_t(), x.get_mpz_t(), y);
+}
+inline void div(mpz_class& q, const mpz_class& x, unsigned int y)
+{
+ mpz_div_ui(q.get_mpz_t(), x.get_mpz_t(), y);
+}
+inline void mod(mpz_class& r, const mpz_class& x, unsigned int m)
+{
+ mpz_mod_ui(r.get_mpz_t(), x.get_mpz_t(), m);
+}
+inline int compare(const mpz_class& x, int y)
+{
+ return mpz_cmp_si(x.get_mpz_t(), y);
+}
+#endif
+inline void sub(mpz_class& z, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::sub(z, x, y);
+#else
+ mpz_sub(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline void mul(mpz_class& z, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::mul(z, x, y);
+#else
+ mpz_mul(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline void sqr(mpz_class& z, const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ Vint::mul(z, x, x);
+#else
+ mpz_mul(z.get_mpz_t(), x.get_mpz_t(), x.get_mpz_t());
+#endif
+}
+inline void divmod(mpz_class& q, mpz_class& r, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::divMod(&q, r, x, y);
+#else
+ mpz_divmod(q.get_mpz_t(), r.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline void div(mpz_class& q, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::div(q, x, y);
+#else
+ mpz_div(q.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline void mod(mpz_class& r, const mpz_class& x, const mpz_class& m)
+{
+#ifdef MCL_USE_VINT
+ Vint::mod(r, x, m);
+#else
+ mpz_mod(r.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t());
+#endif
+}
+inline void clear(mpz_class& z)
+{
+#ifdef MCL_USE_VINT
+ z.clear();
+#else
+ mpz_set_ui(z.get_mpz_t(), 0);
+#endif
+}
+inline bool isZero(const mpz_class& z)
+{
+#ifdef MCL_USE_VINT
+ return z.isZero();
+#else
+ return mpz_sgn(z.get_mpz_t()) == 0;
+#endif
+}
+inline bool isNegative(const mpz_class& z)
+{
+#ifdef MCL_USE_VINT
+ return z.isNegative();
+#else
+ return mpz_sgn(z.get_mpz_t()) < 0;
+#endif
+}
+inline void neg(mpz_class& z, const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ Vint::neg(z, x);
+#else
+ mpz_neg(z.get_mpz_t(), x.get_mpz_t());
+#endif
+}
+inline int compare(const mpz_class& x, const mpz_class & y)
+{
+#ifdef MCL_USE_VINT
+ return Vint::compare(x, y);
+#else
+ return mpz_cmp(x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+template<class T>
+void addMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m)
+{
+ add(z, x, y);
+ if (compare(z, m) >= 0) {
+ sub(z, z, m);
+ }
+}
+template<class T>
+void subMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m)
+{
+ sub(z, x, y);
+ if (!isNegative(z)) return;
+ add(z, z, m);
+}
+template<class T>
+void mulMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m)
+{
+ mul(z, x, y);
+ mod(z, z, m);
+}
+inline void sqrMod(mpz_class& z, const mpz_class& x, const mpz_class& m)
+{
+ sqr(z, x);
+ mod(z, z, m);
+}
+// z = x^y (y >= 0)
+inline void pow(mpz_class& z, const mpz_class& x, unsigned int y)
+{
+#ifdef MCL_USE_VINT
+ Vint::pow(z, x, y);
+#else
+ mpz_pow_ui(z.get_mpz_t(), x.get_mpz_t(), y);
+#endif
+}
+// z = x^y mod m (y >=0)
+inline void powMod(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& m)
+{
+#ifdef MCL_USE_VINT
+ Vint::powMod(z, x, y, m);
+#else
+ mpz_powm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t(), m.get_mpz_t());
+#endif
+}
+// z = 1/x mod m
+inline void invMod(mpz_class& z, const mpz_class& x, const mpz_class& m)
+{
+#ifdef MCL_USE_VINT
+ Vint::invMod(z, x, m);
+#else
+ mpz_invert(z.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t());
+#endif
+}
+// z = lcm(x, y)
+inline void lcm(mpz_class& z, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::lcm(z, x, y);
+#else
+ mpz_lcm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline mpz_class lcm(const mpz_class& x, const mpz_class& y)
+{
+ mpz_class z;
+ lcm(z, x, y);
+ return z;
+}
+// z = gcd(x, y)
+inline void gcd(mpz_class& z, const mpz_class& x, const mpz_class& y)
+{
+#ifdef MCL_USE_VINT
+ Vint::gcd(z, x, y);
+#else
+ mpz_gcd(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t());
+#endif
+}
+inline mpz_class gcd(const mpz_class& x, const mpz_class& y)
+{
+ mpz_class z;
+ gcd(z, x, y);
+ return z;
+}
+/*
+ assume p : odd prime
+ return 1 if x^2 = a mod p for some x
+ return -1 if x^2 != a mod p for any x
+*/
+inline int legendre(const mpz_class& a, const mpz_class& p)
+{
+#ifdef MCL_USE_VINT
+ return Vint::jacobi(a, p);
+#else
+ return mpz_legendre(a.get_mpz_t(), p.get_mpz_t());
+#endif
+}
+inline bool isPrime(bool *pb, const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ return x.isPrime(pb, 32);
+#else
+ *pb = true;
+ return mpz_probab_prime_p(x.get_mpz_t(), 32) != 0;
+#endif
+}
+inline size_t getBitSize(const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ return x.getBitSize();
+#else
+ return mpz_sizeinbase(x.get_mpz_t(), 2);
+#endif
+}
+inline bool testBit(const mpz_class& x, size_t pos)
+{
+#ifdef MCL_USE_VINT
+ return x.testBit(pos);
+#else
+ return mpz_tstbit(x.get_mpz_t(), pos) != 0;
+#endif
+}
+inline void resetBit(mpz_class& x, size_t pos)
+{
+#ifdef MCL_USE_VINT
+ x.setBit(pos, false);
+#else
+ mpz_clrbit(x.get_mpz_t(), pos);
+#endif
+}
+inline void setBit(mpz_class& x, size_t pos, bool v = true)
+{
+#ifdef MCL_USE_VINT
+ x.setBit(pos, v);
+#else
+ if (v) {
+ mpz_setbit(x.get_mpz_t(), pos);
+ } else {
+ resetBit(x, pos);
+ }
+#endif
+}
+inline const fp::Unit *getUnit(const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ return x.getUnit();
+#else
+ return reinterpret_cast<const fp::Unit*>(x.get_mpz_t()->_mp_d);
+#endif
+}
+inline fp::Unit getUnit(const mpz_class& x, size_t i)
+{
+ return getUnit(x)[i];
+}
+inline size_t getUnitSize(const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ return x.getUnitSize();
+#else
+ return std::abs(x.get_mpz_t()->_mp_size);
+#endif
+}
+inline mpz_class abs(const mpz_class& x)
+{
+#ifdef MCL_USE_VINT
+ return Vint::abs(x);
+#else
+ return ::abs(x);
+#endif
+}
+
+inline void getRand(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen())
+{
+ if (rg.isZero()) rg = fp::RandGen::get();
+ assert(bitSize > 1);
+ const size_t rem = bitSize & 31;
+ const size_t n = (bitSize + 31) / 32;
+ uint32_t buf[128];
+ assert(n <= CYBOZU_NUM_OF_ARRAY(buf));
+ if (n > CYBOZU_NUM_OF_ARRAY(buf)) {
+ *pb = false;
+ return;
+ }
+ rg.read(pb, buf, n * sizeof(buf[0]));
+ if (!*pb) return;
+ uint32_t v = buf[n - 1];
+ if (rem == 0) {
+ v |= 1U << 31;
+ } else {
+ v &= (1U << rem) - 1;
+ v |= 1U << (rem - 1);
+ }
+ buf[n - 1] = v;
+ setArray(pb, z, buf, n);
+}
+
+inline void getRandPrime(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false)
+{
+ if (rg.isZero()) rg = fp::RandGen::get();
+ assert(bitSize > 2);
+ for (;;) {
+ getRand(pb, z, bitSize, rg);
+ if (!*pb) return;
+ if (setSecondBit) {
+ z |= mpz_class(1) << (bitSize - 2);
+ }
+ if (mustBe3mod4) {
+ z |= 3;
+ }
+ bool ret = isPrime(pb, z);
+ if (!*pb) return;
+ if (ret) return;
+ }
+}
+inline mpz_class getQuadraticNonResidue(const mpz_class& p)
+{
+ mpz_class g = 2;
+ while (legendre(g, p) > 0) {
+ ++g;
+ }
+ return g;
+}
+
+namespace impl {
+
+template<class Vec>
+void convertToBinary(Vec& v, const mpz_class& x)
+{
+ const size_t len = gmp::getBitSize(x);
+ v.resize(len);
+ for (size_t i = 0; i < len; i++) {
+ v[i] = gmp::testBit(x, len - 1 - i) ? 1 : 0;
+ }
+}
+
+template<class Vec>
+size_t getContinuousVal(const Vec& v, size_t pos, int val)
+{
+ while (pos >= 2) {
+ if (v[pos] != val) break;
+ pos--;
+ }
+ return pos;
+}
+
+template<class Vec>
+void convertToNAF(Vec& v, const Vec& in)
+{
+ v.copy(in);
+ size_t pos = v.size() - 1;
+ for (;;) {
+ size_t p = getContinuousVal(v, pos, 0);
+ if (p == 1) return;
+ assert(v[p] == 1);
+ size_t q = getContinuousVal(v, p, 1);
+ if (q == 1) return;
+ assert(v[q] == 0);
+ if (p - q <= 1) {
+ pos = p - 1;
+ continue;
+ }
+ v[q] = 1;
+ for (size_t i = q + 1; i < p; i++) {
+ v[i] = 0;
+ }
+ v[p] = -1;
+ pos = q;
+ }
+}
+
+template<class Vec>
+size_t getNumOfNonZeroElement(const Vec& v)
+{
+ size_t w = 0;
+ for (size_t i = 0; i < v.size(); i++) {
+ if (v[i]) w++;
+ }
+ return w;
+}
+
+} // impl
+
+/*
+ compute a repl of x which has smaller Hamming weights.
+ return true if naf is selected
+*/
+template<class Vec>
+bool getNAF(Vec& v, const mpz_class& x)
+{
+ Vec bin;
+ impl::convertToBinary(bin, x);
+ Vec naf;
+ impl::convertToNAF(naf, bin);
+ const size_t binW = impl::getNumOfNonZeroElement(bin);
+ const size_t nafW = impl::getNumOfNonZeroElement(naf);
+ if (nafW < binW) {
+ v.swap(naf);
+ return true;
+ } else {
+ v.swap(bin);
+ return false;
+ }
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+inline void setStr(mpz_class& z, const std::string& str, int base = 0)
+{
+ bool b;
+ setStr(&b, z, str.c_str(), base);
+ if (!b) throw cybozu::Exception("gmp:setStr");
+}
+template<class T>
+void setArray(mpz_class& z, const T *buf, size_t n)
+{
+ bool b;
+ setArray(&b, z, buf, n);
+ if (!b) throw cybozu::Exception("gmp:setArray");
+}
+template<class T>
+void getArray(T *buf, size_t maxSize, const mpz_class& x)
+{
+ bool b;
+ getArray(&b, buf, maxSize, x);
+ if (!b) throw cybozu::Exception("gmp:getArray");
+}
+inline bool isPrime(const mpz_class& x)
+{
+ bool b;
+ bool ret = isPrime(&b, x);
+ if (!b) throw cybozu::Exception("gmp:isPrime");
+ return ret;
+}
+inline void getRand(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen())
+{
+ bool b;
+ getRand(&b, z, bitSize, rg);
+ if (!b) throw cybozu::Exception("gmp:getRand");
+}
+inline void getRandPrime(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false)
+{
+ bool b;
+ getRandPrime(&b, z, bitSize, rg, setSecondBit, mustBe3mod4);
+ if (!b) throw cybozu::Exception("gmp:getRandPrime");
+}
+#endif
+
+
+} // mcl::gmp
+
+/*
+ Tonelli-Shanks
+*/
+class SquareRoot {
+ bool isPrecomputed_;
+ bool isPrime;
+ mpz_class p;
+ mpz_class g;
+ int r;
+ mpz_class q; // p - 1 = 2^r q
+ mpz_class s; // s = g^q
+ mpz_class q_add_1_div_2;
+ struct Tbl {
+ const char *p;
+ const char *g;
+ int r;
+ const char *q;
+ const char *s;
+ const char *q_add_1_div_2;
+ };
+ bool setIfPrecomputed(const mpz_class& p_)
+ {
+ static const Tbl tbl[] = {
+ { // BN254.p
+ "2523648240000001ba344d80000000086121000000000013a700000000000013",
+ "2",
+ 1,
+ "1291b24120000000dd1a26c0000000043090800000000009d380000000000009",
+ "2523648240000001ba344d80000000086121000000000013a700000000000012",
+ "948d920900000006e8d1360000000021848400000000004e9c0000000000005",
+ },
+ { // BN254.r
+ "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d",
+ "2",
+ 2,
+ "948d920900000006e8d136000000001ffe7e000000000042840000000000003",
+ "9366c4800000000555150000000000122400000000000015",
+ "4a46c9048000000374689b000000000fff3f000000000021420000000000002",
+ },
+ { // BLS12_381,p
+ "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab",
+ "2",
+ 1,
+ "d0088f51cbff34d258dd3db21a5d66bb23ba5c279c2895fb39869507b587b120f55ffff58a9ffffdcff7fffffffd555",
+ "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa",
+ "680447a8e5ff9a692c6e9ed90d2eb35d91dd2e13ce144afd9cc34a83dac3d8907aaffffac54ffffee7fbfffffffeaab",
+ },
+ { // BLS12_381.r
+ "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
+ "5",
+ 32,
+ "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff",
+ "212d79e5b416b6f0fd56dc8d168d6c0c4024ff270b3e0941b788f500b912f1f",
+ "39f6d3a994cebea4199cec0404d0ec02a9ded2017fff2dff80000000",
+ },
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ mpz_class targetPrime;
+ bool b;
+ mcl::gmp::setStr(&b, targetPrime, tbl[i].p, 16);
+ if (!b) continue;
+ if (targetPrime != p_) continue;
+ isPrime = true;
+ p = p_;
+ mcl::gmp::setStr(&b, g, tbl[i].g, 16);
+ if (!b) continue;
+ r = tbl[i].r;
+ mcl::gmp::setStr(&b, q, tbl[i].q, 16);
+ if (!b) continue;
+ mcl::gmp::setStr(&b, s, tbl[i].s, 16);
+ if (!b) continue;
+ mcl::gmp::setStr(&b, q_add_1_div_2, tbl[i].q_add_1_div_2, 16);
+ if (!b) continue;
+ isPrecomputed_ = true;
+ return true;
+ }
+ return false;
+ }
+public:
+ SquareRoot() { clear(); }
+ bool isPrecomputed() const { return isPrecomputed_; }
+ void clear()
+ {
+ isPrecomputed_ = false;
+ isPrime = false;
+ p = 0;
+ g = 0;
+ r = 0;
+ q = 0;
+ s = 0;
+ q_add_1_div_2 = 0;
+ }
+#if !defined(CYBOZU_DONT_USE_USE_STRING) && !defined(CYBOZU_DONT_USE_EXCEPTION)
+ void dump() const
+ {
+ printf("\"%s\",\n", mcl::gmp::getStr(p, 16).c_str());
+ printf("\"%s\",\n", mcl::gmp::getStr(g, 16).c_str());
+ printf("%d,\n", r);
+ printf("\"%s\",\n", mcl::gmp::getStr(q, 16).c_str());
+ printf("\"%s\",\n", mcl::gmp::getStr(s, 16).c_str());
+ printf("\"%s\",\n", mcl::gmp::getStr(q_add_1_div_2, 16).c_str());
+ }
+#endif
+ void set(bool *pb, const mpz_class& _p, bool usePrecomputedTable = true)
+ {
+ if (usePrecomputedTable && setIfPrecomputed(_p)) {
+ *pb = true;
+ return;
+ }
+ p = _p;
+ if (p <= 2) {
+ *pb = false;
+ return;
+ }
+ isPrime = gmp::isPrime(pb, p);
+ if (!*pb) return;
+ if (!isPrime) {
+ *pb = false;
+ return;
+ }
+ g = gmp::getQuadraticNonResidue(p);
+ // p - 1 = 2^r q, q is odd
+ r = 0;
+ q = p - 1;
+ while ((q & 1) == 0) {
+ r++;
+ q /= 2;
+ }
+ gmp::powMod(s, g, q, p);
+ q_add_1_div_2 = (q + 1) / 2;
+ *pb = true;
+ }
+ /*
+ solve x^2 = a mod p
+ */
+ bool get(mpz_class& x, const mpz_class& a) const
+ {
+ if (!isPrime) {
+ return false;
+ }
+ if (a == 0) {
+ x = 0;
+ return true;
+ }
+ if (gmp::legendre(a, p) < 0) return false;
+ if (r == 1) {
+ // (p + 1) / 4 = (q + 1) / 2
+ gmp::powMod(x, a, q_add_1_div_2, p);
+ return true;
+ }
+ mpz_class c = s, d;
+ int e = r;
+ gmp::powMod(d, a, q, p);
+ gmp::powMod(x, a, q_add_1_div_2, p); // destroy a if &x == &a
+ mpz_class dd;
+ mpz_class b;
+ while (d != 1) {
+ int i = 1;
+ dd = d * d; dd %= p;
+ while (dd != 1) {
+ dd *= dd; dd %= p;
+ i++;
+ }
+ b = 1;
+ b <<= e - i - 1;
+ gmp::powMod(b, c, b, p);
+ x *= b; x %= p;
+ c = b * b; c %= p;
+ d *= c; d %= p;
+ e = i;
+ }
+ return true;
+ }
+ /*
+ solve x^2 = a in Fp
+ */
+ template<class Fp>
+ bool get(Fp& x, const Fp& a) const
+ {
+ assert(Fp::getOp().mp == p);
+ if (a == 0) {
+ x = 0;
+ return true;
+ }
+ {
+ bool b;
+ mpz_class aa;
+ a.getMpz(&b, aa);
+ assert(b);
+ if (gmp::legendre(aa, p) < 0) return false;
+ }
+ if (r == 1) {
+ // (p + 1) / 4 = (q + 1) / 2
+ Fp::pow(x, a, q_add_1_div_2);
+ return true;
+ }
+ Fp c, d;
+ {
+ bool b;
+ c.setMpz(&b, s);
+ assert(b);
+ }
+ int e = r;
+ Fp::pow(d, a, q);
+ Fp::pow(x, a, q_add_1_div_2); // destroy a if &x == &a
+ Fp dd;
+ Fp b;
+ while (!d.isOne()) {
+ int i = 1;
+ Fp::sqr(dd, d);
+ while (!dd.isOne()) {
+ dd *= dd;
+ i++;
+ }
+ b = 1;
+// b <<= e - i - 1;
+ for (int j = 0; j < e - i - 1; j++) {
+ b += b;
+ }
+ Fp::pow(b, c, b);
+ x *= b;
+ Fp::sqr(c, b);
+ d *= c;
+ e = i;
+ }
+ return true;
+ }
+ bool operator==(const SquareRoot& rhs) const
+ {
+ return isPrime == rhs.isPrime && p == rhs.p && g == rhs.g && r == rhs.r
+ && q == rhs.q && s == rhs.s && q_add_1_div_2 == rhs.q_add_1_div_2;
+ }
+ bool operator!=(const SquareRoot& rhs) const { return !operator==(rhs); }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void set(const mpz_class& _p)
+ {
+ bool b;
+ set(&b, _p);
+ if (!b) throw cybozu::Exception("gmp:SquareRoot:set");
+ }
+#endif
+};
+
+/*
+ Barrett Reduction
+ for non GMP version
+ mod of GMP is faster than Modp
+*/
+struct Modp {
+ static const size_t unitBitSize = sizeof(mcl::fp::Unit) * 8;
+ mpz_class p_;
+ mpz_class u_;
+ mpz_class a_;
+ size_t pBitSize_;
+ size_t N_;
+ bool initU_; // Is u_ initialized?
+ Modp()
+ : pBitSize_(0)
+ , N_(0)
+ , initU_(false)
+ {
+ }
+ // x &= 1 << (unitBitSize * unitSize)
+ void shrinkSize(mpz_class &x, size_t unitSize) const
+ {
+ size_t u = gmp::getUnitSize(x);
+ if (u < unitSize) return;
+ bool b;
+ gmp::setArray(&b, x, gmp::getUnit(x), unitSize);
+ (void)b;
+ assert(b);
+ }
+ // p_ is set by p and compute (u_, a_) if possible
+ void init(const mpz_class& p)
+ {
+ p_ = p;
+ pBitSize_ = gmp::getBitSize(p);
+ N_ = (pBitSize_ + unitBitSize - 1) / unitBitSize;
+ initU_ = false;
+#if 0
+ u_ = (mpz_class(1) << (unitBitSize * 2 * N_)) / p_;
+#else
+ /*
+ 1 << (unitBitSize * 2 * N_) may be overflow,
+ so use (1 << (unitBitSize * 2 * N_)) - 1 because u_ is same.
+ */
+ uint8_t buf[48 * 2];
+ const size_t byteSize = unitBitSize / 8 * 2 * N_;
+ if (byteSize > sizeof(buf)) return;
+ memset(buf, 0xff, byteSize);
+ bool b;
+ gmp::setArray(&b, u_, buf, byteSize);
+ if (!b) return;
+#endif
+ u_ /= p_;
+ a_ = mpz_class(1) << (unitBitSize * (N_ + 1));
+ initU_ = true;
+ }
+ void modp(mpz_class& r, const mpz_class& t) const
+ {
+ assert(p_ > 0);
+ const size_t tBitSize = gmp::getBitSize(t);
+ // use gmp::mod if init() fails or t is too large
+ if (tBitSize > unitBitSize * 2 * N_ || !initU_) {
+ gmp::mod(r, t, p_);
+ return;
+ }
+ if (tBitSize < pBitSize_) {
+ r = t;
+ return;
+ }
+ // mod is faster than modp if t is small
+ if (tBitSize <= unitBitSize * N_) {
+ gmp::mod(r, t, p_);
+ return;
+ }
+ mpz_class q;
+ q = t;
+ q >>= unitBitSize * (N_ - 1);
+ q *= u_;
+ q >>= unitBitSize * (N_ + 1);
+ q *= p_;
+ shrinkSize(q, N_ + 1);
+ r = t;
+ shrinkSize(r, N_ + 1);
+ r -= q;
+ if (r < 0) {
+ r += a_;
+ }
+ if (r >= p_) {
+ r -= p_;
+ }
+ }
+};
+
+} // mcl
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/impl/bn_c_impl.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/impl/bn_c_impl.hpp
new file mode 100644
index 000000000..bec2466dd
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/impl/bn_c_impl.hpp
@@ -0,0 +1,643 @@
+/*
+ This is an internal header
+ Do not include this
+*/
+#define MCLBN_DLL_EXPORT
+#include <mcl/bn.h>
+
+#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4
+#include <mcl/bn256.hpp>
+#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6
+#include <mcl/bn384.hpp>
+#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4
+#include <mcl/bls12_381.hpp>
+#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8
+#include <mcl/bn512.hpp>
+#else
+ #error "not supported size"
+#endif
+#include <mcl/lagrange.hpp>
+#include <mcl/ecparam.hpp>
+using namespace mcl::bn;
+
+static Fr *cast(mclBnFr *p) { return reinterpret_cast<Fr*>(p); }
+static const Fr *cast(const mclBnFr *p) { return reinterpret_cast<const Fr*>(p); }
+
+static G1 *cast(mclBnG1 *p) { return reinterpret_cast<G1*>(p); }
+static const G1 *cast(const mclBnG1 *p) { return reinterpret_cast<const G1*>(p); }
+
+static G2 *cast(mclBnG2 *p) { return reinterpret_cast<G2*>(p); }
+static const G2 *cast(const mclBnG2 *p) { return reinterpret_cast<const G2*>(p); }
+
+static Fp12 *cast(mclBnGT *p) { return reinterpret_cast<Fp12*>(p); }
+static const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast<const Fp12*>(p); }
+
+static Fp6 *cast(uint64_t *p) { return reinterpret_cast<Fp6*>(p); }
+static const Fp6 *cast(const uint64_t *p) { return reinterpret_cast<const Fp6*>(p); }
+
+static Fp2 *cast(mclBnFp2 *p) { return reinterpret_cast<Fp2*>(p); }
+static const Fp2 *cast(const mclBnFp2 *p) { return reinterpret_cast<const Fp2*>(p); }
+
+static Fp *cast(mclBnFp *p) { return reinterpret_cast<Fp*>(p); }
+static const Fp *cast(const mclBnFp *p) { return reinterpret_cast<const Fp*>(p); }
+
+template<class T>
+int setStr(T *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ size_t n = cast(x)->deserialize(buf, bufSize, ioMode);
+ return n > 0 ? 0 : -1;
+}
+
+#ifdef __EMSCRIPTEN__
+// use these functions forcibly
+extern "C" MCLBN_DLL_API void *mclBnMalloc(size_t n)
+{
+ return malloc(n);
+}
+extern "C" MCLBN_DLL_API void mclBnFree(void *p)
+{
+ free(p);
+}
+#endif
+
+int mclBn_getVersion()
+{
+ return mcl::version;
+}
+
+int mclBn_init(int curve, int compiledTimeVar)
+{
+ if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) {
+ return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100));
+ }
+ if (MCL_EC_BEGIN <= curve && curve < MCL_EC_END) {
+ const mcl::EcParam *para = mcl::getEcParam(curve);
+ if (para == 0) return -2;
+ bool b;
+ initG1only(&b, *para);
+ return b ? 0 : -1;
+ }
+ const mcl::CurveParam& cp = mcl::getCurveParam(curve);
+ bool b;
+ initPairing(&b, cp);
+ return b ? 0 : -1;
+}
+
+int mclBn_getOpUnitSize()
+{
+ return (int)Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t);
+}
+
+int mclBn_getG1ByteSize()
+{
+ return mclBn_getFpByteSize();
+}
+
+int mclBn_getFrByteSize()
+{
+ return (int)Fr::getByteSize();
+}
+
+int mclBn_getFpByteSize()
+{
+ return (int)Fp::getByteSize();
+}
+
+mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize)
+{
+ return Fr::getModulo(buf, maxBufSize);
+}
+
+mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize)
+{
+ return Fp::getModulo(buf, maxBufSize);
+}
+
+////////////////////////////////////////////////
+// set zero
+void mclBnFr_clear(mclBnFr *x)
+{
+ cast(x)->clear();
+}
+
+// set x to y
+void mclBnFr_setInt(mclBnFr *y, mclInt x)
+{
+ *cast(y) = x;
+}
+void mclBnFr_setInt32(mclBnFr *y, int x)
+{
+ *cast(y) = x;
+}
+
+int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ return setStr(x, buf, bufSize, ioMode);
+}
+int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize)
+{
+ cast(x)->setArrayMask((const char *)buf, bufSize);
+ return 0;
+}
+int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize)
+{
+ bool b;
+ cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod);
+ return b ? 0 : -1;
+}
+mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+// return 1 if true
+int mclBnFr_isValid(const mclBnFr *x)
+{
+ return cast(x)->isValid();
+}
+int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y)
+{
+ return *cast(x) == *cast(y);
+}
+int mclBnFr_isZero(const mclBnFr *x)
+{
+ return cast(x)->isZero();
+}
+int mclBnFr_isOne(const mclBnFr *x)
+{
+ return cast(x)->isOne();
+}
+
+#ifndef MCL_DONT_USE_CSRPNG
+int mclBnFr_setByCSPRNG(mclBnFr *x)
+{
+ bool b;
+ cast(x)->setByCSPRNG(&b);
+ return b ? 0 : -1;
+}
+void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize))
+{
+ mcl::fp::RandGen::setRandFunc(self, readFunc);
+}
+#endif
+
+// hash(buf) and set x
+int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize)
+{
+ cast(x)->setHashOf(buf, bufSize);
+ return 0;
+}
+
+mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode)
+{
+ return cast(x)->getStr(buf, maxBufSize, ioMode);
+}
+mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnFr_neg(mclBnFr *y, const mclBnFr *x)
+{
+ Fr::neg(*cast(y), *cast(x));
+}
+void mclBnFr_inv(mclBnFr *y, const mclBnFr *x)
+{
+ Fr::inv(*cast(y), *cast(x));
+}
+void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x)
+{
+ Fr::sqr(*cast(y), *cast(x));
+}
+void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y)
+{
+ Fr::add(*cast(z),*cast(x), *cast(y));
+}
+void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y)
+{
+ Fr::sub(*cast(z),*cast(x), *cast(y));
+}
+void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y)
+{
+ Fr::mul(*cast(z),*cast(x), *cast(y));
+}
+void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y)
+{
+ Fr::div(*cast(z),*cast(x), *cast(y));
+}
+
+////////////////////////////////////////////////
+// set zero
+void mclBnG1_clear(mclBnG1 *x)
+{
+ cast(x)->clear();
+}
+
+int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ return setStr(x, buf, bufSize, ioMode);
+}
+mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+
+// return 1 if true
+int mclBnG1_isValid(const mclBnG1 *x)
+{
+ return cast(x)->isValid();
+}
+int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y)
+{
+ return *cast(x) == *cast(y);
+}
+int mclBnG1_isZero(const mclBnG1 *x)
+{
+ return cast(x)->isZero();
+}
+int mclBnG1_isValidOrder(const mclBnG1 *x)
+{
+ return cast(x)->isValidOrder();
+}
+
+int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize)
+{
+ hashAndMapToG1(*cast(x), buf, bufSize);
+ return 0;
+}
+
+mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode)
+{
+ return cast(x)->getStr(buf, maxBufSize, ioMode);
+}
+
+mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x)
+{
+ G1::neg(*cast(y), *cast(x));
+}
+void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x)
+{
+ G1::dbl(*cast(y), *cast(x));
+}
+void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x)
+{
+ G1::normalize(*cast(y), *cast(x));
+}
+void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y)
+{
+ G1::add(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y)
+{
+ G1::sub(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y)
+{
+ G1::mul(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y)
+{
+ G1::mulCT(*cast(z),*cast(x), *cast(y));
+}
+
+////////////////////////////////////////////////
+// set zero
+void mclBnG2_clear(mclBnG2 *x)
+{
+ cast(x)->clear();
+}
+
+int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ return setStr(x, buf, bufSize, ioMode);
+}
+mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+
+// return 1 if true
+int mclBnG2_isValid(const mclBnG2 *x)
+{
+ return cast(x)->isValid();
+}
+int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y)
+{
+ return *cast(x) == *cast(y);
+}
+int mclBnG2_isZero(const mclBnG2 *x)
+{
+ return cast(x)->isZero();
+}
+int mclBnG2_isValidOrder(const mclBnG2 *x)
+{
+ return cast(x)->isValidOrder();
+}
+
+int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize)
+{
+ hashAndMapToG2(*cast(x), buf, bufSize);
+ return 0;
+}
+
+mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode)
+{
+ return cast(x)->getStr(buf, maxBufSize, ioMode);
+}
+
+mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x)
+{
+ G2::neg(*cast(y), *cast(x));
+}
+void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x)
+{
+ G2::dbl(*cast(y), *cast(x));
+}
+void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x)
+{
+ G2::normalize(*cast(y), *cast(x));
+}
+void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y)
+{
+ G2::add(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y)
+{
+ G2::sub(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y)
+{
+ G2::mul(*cast(z),*cast(x), *cast(y));
+}
+void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y)
+{
+ G2::mulCT(*cast(z),*cast(x), *cast(y));
+}
+
+////////////////////////////////////////////////
+// set zero
+void mclBnGT_clear(mclBnGT *x)
+{
+ cast(x)->clear();
+}
+void mclBnGT_setInt(mclBnGT *y, mclInt x)
+{
+ cast(y)->clear();
+ *(cast(y)->getFp0()) = x;
+}
+void mclBnGT_setInt32(mclBnGT *y, int x)
+{
+ cast(y)->clear();
+ *(cast(y)->getFp0()) = x;
+}
+
+int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ return setStr(x, buf, bufSize, ioMode);
+}
+mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+
+// return 1 if true
+int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y)
+{
+ return *cast(x) == *cast(y);
+}
+int mclBnGT_isZero(const mclBnGT *x)
+{
+ return cast(x)->isZero();
+}
+int mclBnGT_isOne(const mclBnGT *x)
+{
+ return cast(x)->isOne();
+}
+
+mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode)
+{
+ return cast(x)->getStr(buf, maxBufSize, ioMode);
+}
+
+mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnGT_neg(mclBnGT *y, const mclBnGT *x)
+{
+ Fp12::neg(*cast(y), *cast(x));
+}
+void mclBnGT_inv(mclBnGT *y, const mclBnGT *x)
+{
+ Fp12::inv(*cast(y), *cast(x));
+}
+void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x)
+{
+ Fp12::sqr(*cast(y), *cast(x));
+}
+void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y)
+{
+ Fp12::add(*cast(z),*cast(x), *cast(y));
+}
+void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y)
+{
+ Fp12::sub(*cast(z),*cast(x), *cast(y));
+}
+void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y)
+{
+ Fp12::mul(*cast(z),*cast(x), *cast(y));
+}
+void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y)
+{
+ Fp12::div(*cast(z),*cast(x), *cast(y));
+}
+
+void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y)
+{
+ Fp12::pow(*cast(z), *cast(x), *cast(y));
+}
+void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y)
+{
+ Fp12::powGeneric(*cast(z), *cast(x), *cast(y));
+}
+
+void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y)
+{
+ pairing(*cast(z), *cast(x), *cast(y));
+}
+void mclBn_finalExp(mclBnGT *y, const mclBnGT *x)
+{
+ finalExp(*cast(y), *cast(x));
+}
+void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y)
+{
+ millerLoop(*cast(z), *cast(x), *cast(y));
+}
+int mclBn_getUint64NumToPrecompute(void)
+{
+ return int(BN::param.precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t));
+}
+
+void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q)
+{
+ precomputeG2(cast(Qbuf), *cast(Q));
+}
+
+void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf)
+{
+ precomputedMillerLoop(*cast(f), *cast(P), cast(Qbuf));
+}
+
+void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf)
+{
+ precomputedMillerLoop2(*cast(f), *cast(P1), cast(Q1buf), *cast(P2), cast(Q2buf));
+}
+
+void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf)
+{
+ precomputedMillerLoop2mixed(*cast(f), *cast(P1), *cast(Q1), *cast(P2), cast(Q2buf));
+}
+
+int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k)
+{
+ bool b;
+ mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k);
+ return b ? 0 : -1;
+}
+int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k)
+{
+ bool b;
+ mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k);
+ return b ? 0 : -1;
+}
+int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k)
+{
+ bool b;
+ mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k);
+ return b ? 0 : -1;
+}
+int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x)
+{
+ bool b;
+ mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x));
+ return b ? 0 : -1;
+}
+int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x)
+{
+ bool b;
+ mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x));
+ return b ? 0 : -1;
+}
+int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x)
+{
+ bool b;
+ mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x));
+ return b ? 0 : -1;
+}
+
+void mclBn_verifyOrderG1(int doVerify)
+{
+ verifyOrderG1(doVerify != 0);
+}
+
+void mclBn_verifyOrderG2(int doVerify)
+{
+ verifyOrderG2(doVerify != 0);
+}
+
+mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode)
+{
+ return cast(x)->getStr(buf, maxBufSize, ioMode);
+}
+int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode)
+{
+ return setStr(x, buf, bufSize, ioMode);
+}
+mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+
+mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnFp_clear(mclBnFp *x)
+{
+ cast(x)->clear();
+}
+
+int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize)
+{
+ cast(x)->setArrayMask((const char *)buf, bufSize);
+ return 0;
+}
+
+int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize)
+{
+ bool b;
+ cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod);
+ return b ? 0 : -1;
+}
+int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y)
+{
+ return *cast(x) == *cast(y);
+}
+
+int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize)
+{
+ cast(x)->setHashOf(buf, bufSize);
+ return 0;
+}
+
+int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x)
+{
+ bool b;
+ mapToG1(&b, *cast(y), *cast(x));
+ return b ? 0 : -1;
+}
+
+mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize)
+{
+ return (mclSize)cast(x)->deserialize(buf, bufSize);
+}
+
+mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x)
+{
+ return (mclSize)cast(x)->serialize(buf, maxBufSize);
+}
+
+void mclBnFp2_clear(mclBnFp2 *x)
+{
+ cast(x)->clear();
+}
+
+int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y)
+{
+ return *cast(x) == *cast(y);
+}
+
+int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x)
+{
+ bool b;
+ mapToG2(&b, *cast(y), *cast(x));
+ return b ? 0 : -1;
+}
+
+int mclBnG1_getBasePoint(mclBnG1 *x)
+{
+ *cast(x) = mcl::bn::getG1basePoint();
+ return 0;
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/lagrange.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/lagrange.hpp
new file mode 100644
index 000000000..18e0597ec
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/lagrange.hpp
@@ -0,0 +1,107 @@
+#pragma once
+/**
+ @file
+ @brief Lagrange Interpolation
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+namespace mcl {
+
+/*
+ recover out = f(0) by { (x, y) | x = S[i], y = f(x) = vec[i] }
+ @retval 0 if succeed else -1
+*/
+template<class G, class F>
+void LagrangeInterpolation(bool *pb, G& out, const F *S, const G *vec, size_t k)
+{
+ if (k == 0) {
+ *pb = false;
+ return;
+ }
+ if (k == 1) {
+ out = vec[0];
+ *pb = true;
+ return;
+ }
+ /*
+ delta_{i,S}(0) = prod_{j != i} S[j] / (S[j] - S[i]) = a / b
+ where a = prod S[j], b = S[i] * prod_{j != i} (S[j] - S[i])
+ */
+ F a = S[0];
+ for (size_t i = 1; i < k; i++) {
+ a *= S[i];
+ }
+ if (a.isZero()) {
+ *pb = false;
+ return;
+ }
+ /*
+ f(0) = sum_i f(S[i]) delta_{i,S}(0)
+ */
+ G r;
+ r.clear();
+ for (size_t i = 0; i < k; i++) {
+ F b = S[i];
+ for (size_t j = 0; j < k; j++) {
+ if (j != i) {
+ F v = S[j] - S[i];
+ if (v.isZero()) {
+ *pb = false;
+ return;
+ }
+ b *= v;
+ }
+ }
+ G t;
+ G::mul(t, vec[i], a / b);
+ r += t;
+ }
+ out = r;
+ *pb = true;
+}
+
+/*
+ out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1)
+ @retval 0 if succeed else -1 (if cSize == 0)
+*/
+template<class G, class T>
+void evaluatePolynomial(bool *pb, G& out, const G *c, size_t cSize, const T& x)
+{
+ if (cSize == 0) {
+ *pb = false;
+ return;
+ }
+ if (cSize == 1) {
+ out = c[0];
+ *pb = true;
+ return;
+ }
+ G y = c[cSize - 1];
+ for (int i = (int)cSize - 2; i >= 0; i--) {
+ G::mul(y, y, x);
+ G::add(y, y, c[i]);
+ }
+ out = y;
+ *pb = true;
+}
+
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+template<class G, class F>
+void LagrangeInterpolation(G& out, const F *S, const G *vec, size_t k)
+{
+ bool b;
+ LagrangeInterpolation(&b, out, S, vec, k);
+ if (!b) throw cybozu::Exception("LagrangeInterpolation");
+}
+
+template<class G, class T>
+void evaluatePolynomial(G& out, const G *c, size_t cSize, const T& x)
+{
+ bool b;
+ evaluatePolynomial(&b, out, c, cSize, x);
+ if (!b) throw cybozu::Exception("evaluatePolynomial");
+}
+#endif
+
+} // mcl
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/op.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/op.hpp
new file mode 100644
index 000000000..36d37035e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/op.hpp
@@ -0,0 +1,389 @@
+#pragma once
+/**
+ @file
+ @brief definition of Op
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/gmp_util.hpp>
+#include <memory.h>
+#include <mcl/array.hpp>
+
+#ifndef MCL_MAX_BIT_SIZE
+ #define MCL_MAX_BIT_SIZE 521
+#endif
+#if defined(__EMSCRIPTEN__) || defined(__wasm__)
+ #define MCL_DONT_USE_XBYAK
+ #define MCL_DONT_USE_OPENSSL
+#endif
+#if !defined(MCL_DONT_USE_XBYAK) && (defined(_WIN64) || defined(__x86_64__)) && (MCL_SIZEOF_UNIT == 8)
+ #define MCL_USE_XBYAK
+ #define MCL_XBYAK_DIRECT_CALL
+#endif
+
+#define MCL_MAX_HASH_BIT_SIZE 512
+
+namespace mcl {
+
+static const int version = 0x092; /* 0xABC = A.BC */
+
+/*
+ specifies available string format mode for X::setIoMode()
+ // for Fp, Fp2, Fp6, Fp12
+ default(0) : IoDec
+ printable string(zero terminated, variable size)
+ IoBin(2) | IoDec(10) | IoHex(16) | IoBinPrefix | IoHexPrefix
+
+ byte string(not zero terminated, fixed size)
+ IoArray | IoArrayRaw
+ IoArray = IoSerialize
+
+ // for Ec
+ affine(0) | IoEcCompY | IoComp
+ default : affine
+
+ affine and IoEcCompY are available with ioMode for Fp
+ IoSerialize ignores ioMode for Fp
+
+ IoAuto
+ dec or hex according to ios_base::fmtflags
+ IoBin
+ binary number([01]+)
+ IoDec
+ decimal number
+ IoHex
+ hexadecimal number([0-9a-fA-F]+)
+ IoBinPrefix
+ 0b + <binary number>
+ IoHexPrefix
+ 0x + <hexadecimal number>
+ IoArray
+ array of Unit(fixed size = Fp::getByteSize())
+ IoArrayRaw
+ array of Unit(fixed size = Fp::getByteSize()) without Montgomery conversion
+
+ // for Ec::setIoMode()
+ IoEcAffine(default)
+ "0" ; infinity
+ "1 <x> <y>" ; affine coordinate
+
+ IoEcProj
+ "4" <x> <y> <z> ; projective or jacobi coordinate
+
+ IoEcCompY
+ 1-bit y prepresentation of elliptic curve
+ "2 <x>" ; compressed for even y
+ "3 <x>" ; compressed for odd y
+
+ IoSerialize
+ if isMSBserialize(): // p is not full bit
+ size = Fp::getByteSize()
+ use MSB of array of x for 1-bit y for prime p where (p % 8 != 0)
+ [0] ; infinity
+ <x> ; for even y
+ <x>|1 ; for odd y ; |1 means set MSB of x
+ else:
+ size = Fp::getByteSize() + 1
+ [0] ; infinity
+ 2 <x> ; for even y
+ 3 <x> ; for odd y
+*/
+enum IoMode {
+ IoAuto = 0, // dec or hex according to ios_base::fmtflags
+ IoBin = 2, // binary number without prefix
+ IoDec = 10, // decimal number without prefix
+ IoHex = 16, // hexadecimal number without prefix
+ IoArray = 32, // array of Unit(fixed size)
+ IoArrayRaw = 64, // raw array of Unit without Montgomery conversion
+ IoPrefix = 128, // append '0b'(bin) or '0x'(hex)
+ IoBinPrefix = IoBin | IoPrefix,
+ IoHexPrefix = IoHex | IoPrefix,
+ IoEcAffine = 0, // affine coordinate
+ IoEcCompY = 256, // 1-bit y representation of elliptic curve
+ IoSerialize = 512, // use MBS for 1-bit y
+ IoFixedSizeByteSeq = IoSerialize, // obsolete
+ IoEcProj = 1024, // projective or jacobi coordinate
+ IoSerializeHexStr = 2048 // printable hex string
+};
+
+namespace fp {
+
+const size_t UnitBitSize = sizeof(Unit) * 8;
+
+const size_t maxUnitSize = (MCL_MAX_BIT_SIZE + UnitBitSize - 1) / UnitBitSize;
+#define MCL_MAX_UNIT_SIZE ((MCL_MAX_BIT_SIZE + MCL_UNIT_BIT_SIZE - 1) / MCL_UNIT_BIT_SIZE)
+
+struct FpGenerator;
+struct Op;
+
+typedef void (*void1u)(Unit*);
+typedef void (*void2u)(Unit*, const Unit*);
+typedef void (*void2uI)(Unit*, const Unit*, Unit);
+typedef void (*void2uIu)(Unit*, const Unit*, Unit, const Unit*);
+typedef void (*void2uOp)(Unit*, const Unit*, const Op&);
+typedef void (*void3u)(Unit*, const Unit*, const Unit*);
+typedef void (*void4u)(Unit*, const Unit*, const Unit*, const Unit*);
+typedef int (*int2u)(Unit*, const Unit*);
+
+typedef Unit (*u1uII)(Unit*, Unit, Unit);
+typedef Unit (*u3u)(Unit*, const Unit*, const Unit*);
+
+/*
+ disable -Wcast-function-type
+ the number of arguments of some JIT functions is smaller than that of T
+*/
+template<class T, class S>
+T func_ptr_cast(S func)
+{
+ return reinterpret_cast<T>(reinterpret_cast<void*>(func));
+}
+struct Block {
+ const Unit *p; // pointer to original FpT.v_
+ size_t n;
+ Unit v_[maxUnitSize];
+};
+
+enum Mode {
+ FP_AUTO,
+ FP_GMP,
+ FP_GMP_MONT,
+ FP_LLVM,
+ FP_LLVM_MONT,
+ FP_XBYAK
+};
+
+enum PrimeMode {
+ PM_GENERIC = 0,
+ PM_NIST_P192,
+ PM_SECP256K1,
+ PM_NIST_P521
+};
+
+enum MaskMode {
+ NoMask = 0, // throw if greater or equal
+ SmallMask = 1, // 1-bit smaller mask if greater or equal
+ MaskAndMod = 2, // mask and substract if greater or equal
+ Mod = 3 // mod p
+};
+
+struct Op {
+ /*
+ don't change the layout of rp and p
+ asm code assumes &rp + 1 == p
+ */
+ Unit rp;
+ Unit p[maxUnitSize];
+ mpz_class mp;
+ uint32_t pmod4;
+ mcl::SquareRoot sq;
+ mcl::Modp modp;
+ Unit half[maxUnitSize]; // (p + 1) / 2
+ Unit oneRep[maxUnitSize]; // 1(=inv R if Montgomery)
+ /*
+ for Montgomery
+ one = 1
+ R = (1 << (N * sizeof(Unit) * 8)) % p
+ R2 = (R * R) % p
+ R3 = RR^3
+ */
+ Unit one[maxUnitSize];
+ Unit R2[maxUnitSize];
+ Unit R3[maxUnitSize];
+#ifdef MCL_USE_XBYAK
+ FpGenerator *fg;
+ mcl::Array<Unit> invTbl;
+#endif
+ void3u fp_addA_;
+ void3u fp_subA_;
+ void2u fp_negA_;
+ void3u fp_mulA_;
+ void2u fp_sqrA_;
+ void3u fp2_addA_;
+ void3u fp2_subA_;
+ void2u fp2_negA_;
+ void3u fp2_mulA_;
+ void2u fp2_sqrA_;
+ void3u fpDbl_addA_;
+ void3u fpDbl_subA_;
+ void3u fpDbl_mulPreA_;
+ void2u fpDbl_sqrPreA_;
+ void2u fpDbl_modA_;
+ void3u fp2Dbl_mulPreA_;
+ void2u fp2Dbl_sqrPreA_;
+ size_t maxN;
+ size_t N;
+ size_t bitSize;
+ bool (*fp_isZero)(const Unit*);
+ void1u fp_clear;
+ void2u fp_copy;
+ void2u fp_shr1;
+ void3u fp_neg;
+ void4u fp_add;
+ void4u fp_sub;
+ void4u fp_mul;
+ void3u fp_sqr;
+ void2uOp fp_invOp;
+ void2uIu fp_mulUnit; // fpN1_mod + fp_mulUnitPre
+
+ void3u fpDbl_mulPre;
+ void2u fpDbl_sqrPre;
+ int2u fp_preInv;
+ void2uI fp_mulUnitPre; // z[N + 1] = x[N] * y
+ void3u fpN1_mod; // y[N] = x[N + 1] % p[N]
+
+ void4u fpDbl_add;
+ void4u fpDbl_sub;
+ void3u fpDbl_mod;
+
+ u3u fp_addPre; // without modulo p
+ u3u fp_subPre; // without modulo p
+ u3u fpDbl_addPre;
+ u3u fpDbl_subPre;
+ /*
+ for Fp2 = F[u] / (u^2 + 1)
+ x = a + bu
+ */
+ int xi_a; // xi = xi_a + u
+ void4u fp2_mulNF;
+ void2u fp2_inv;
+ void2u fp2_mul_xiA_;
+ uint32_t (*hash)(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize);
+
+ PrimeMode primeMode;
+ bool isFullBit; // true if bitSize % uniSize == 0
+ bool isMont; // true if use Montgomery
+ bool isFastMod; // true if modulo is fast
+
+ Op()
+ {
+ clear();
+ }
+ ~Op()
+ {
+#ifdef MCL_USE_XBYAK
+ destroyFpGenerator(fg);
+#endif
+ }
+ void clear()
+ {
+ rp = 0;
+ memset(p, 0, sizeof(p));
+ mp = 0;
+ pmod4 = 0;
+ sq.clear();
+ // fg is not set
+ memset(half, 0, sizeof(half));
+ memset(oneRep, 0, sizeof(oneRep));
+ memset(one, 0, sizeof(one));
+ memset(R2, 0, sizeof(R2));
+ memset(R3, 0, sizeof(R3));
+#ifdef MCL_USE_XBYAK
+ invTbl.clear();
+#endif
+ fp_addA_ = 0;
+ fp_subA_ = 0;
+ fp_negA_ = 0;
+ fp_mulA_ = 0;
+ fp_sqrA_ = 0;
+ fp2_addA_ = 0;
+ fp2_subA_ = 0;
+ fp2_negA_ = 0;
+ fp2_mulA_ = 0;
+ fp2_sqrA_ = 0;
+ fpDbl_addA_ = 0;
+ fpDbl_subA_ = 0;
+ fpDbl_mulPreA_ = 0;
+ fpDbl_sqrPreA_ = 0;
+ fpDbl_modA_ = 0;
+ fp2Dbl_mulPreA_ = 0;
+ fp2Dbl_sqrPreA_ = 0;
+ maxN = 0;
+ N = 0;
+ bitSize = 0;
+ fp_isZero = 0;
+ fp_clear = 0;
+ fp_copy = 0;
+ fp_shr1 = 0;
+ fp_neg = 0;
+ fp_add = 0;
+ fp_sub = 0;
+ fp_mul = 0;
+ fp_sqr = 0;
+ fp_invOp = 0;
+ fp_mulUnit = 0;
+
+ fpDbl_mulPre = 0;
+ fpDbl_sqrPre = 0;
+ fp_preInv = 0;
+ fp_mulUnitPre = 0;
+ fpN1_mod = 0;
+
+ fpDbl_add = 0;
+ fpDbl_sub = 0;
+ fpDbl_mod = 0;
+
+ fp_addPre = 0;
+ fp_subPre = 0;
+ fpDbl_addPre = 0;
+ fpDbl_subPre = 0;
+
+ xi_a = 0;
+ fp2_mulNF = 0;
+ fp2_inv = 0;
+ fp2_mul_xiA_ = 0;
+
+ primeMode = PM_GENERIC;
+ isFullBit = false;
+ isMont = false;
+ isFastMod = false;
+ hash = 0;
+ }
+ void fromMont(Unit* y, const Unit *x) const
+ {
+ /*
+ M(x, y) = xyR^-1
+ y = M(x, 1) = xR^-1
+ */
+ fp_mul(y, x, one, p);
+ }
+ void toMont(Unit* y, const Unit *x) const
+ {
+ /*
+ y = M(x, R2) = xR^2 R^-1 = xR
+ */
+ fp_mul(y, x, R2, p);
+ }
+ bool init(const mpz_class& p, size_t maxBitSize, int xi_a, Mode mode, size_t mclMaxBitSize = MCL_MAX_BIT_SIZE);
+#ifdef MCL_USE_XBYAK
+ static FpGenerator* createFpGenerator();
+ static void destroyFpGenerator(FpGenerator *fg);
+#endif
+private:
+ Op(const Op&);
+ void operator=(const Op&);
+};
+
+inline const char* getIoSeparator(int ioMode)
+{
+ return (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) ? "" : " ";
+}
+
+inline void dump(const char *s, size_t n)
+{
+ for (size_t i = 0; i < n; i++) {
+ printf("%02x ", (uint8_t)s[i]);
+ }
+ printf("\n");
+}
+
+#ifndef CYBOZU_DONT_USE_STRING
+int detectIoMode(int ioMode, const std::ios_base& ios);
+
+inline void dump(const std::string& s)
+{
+ dump(s.c_str(), s.size());
+}
+#endif
+
+} } // mcl::fp
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/operator.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/operator.hpp
new file mode 100644
index 000000000..e9bc506df
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/operator.hpp
@@ -0,0 +1,177 @@
+#pragma once
+/**
+ @file
+ @brief operator class
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/op.hpp>
+#include <mcl/util.hpp>
+#ifdef _MSC_VER
+ #ifndef MCL_FORCE_INLINE
+ #define MCL_FORCE_INLINE __forceinline
+ #endif
+ #pragma warning(push)
+ #pragma warning(disable : 4714)
+#else
+ #ifndef MCL_FORCE_INLINE
+ #define MCL_FORCE_INLINE __attribute__((always_inline))
+ #endif
+#endif
+
+namespace mcl { namespace fp {
+
+template<class T>
+struct Empty {};
+
+/*
+ T must have add, sub, mul, inv, neg
+*/
+template<class T, class E = Empty<T> >
+struct Operator : public E {
+ template<class S> MCL_FORCE_INLINE T& operator+=(const S& rhs) { T::add(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); }
+ template<class S> MCL_FORCE_INLINE T& operator-=(const S& rhs) { T::sub(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); }
+ template<class S> friend MCL_FORCE_INLINE T operator+(const T& a, const S& b) { T c; T::add(c, a, b); return c; }
+ template<class S> friend MCL_FORCE_INLINE T operator-(const T& a, const S& b) { T c; T::sub(c, a, b); return c; }
+ template<class S> MCL_FORCE_INLINE T& operator*=(const S& rhs) { T::mul(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); }
+ template<class S> friend MCL_FORCE_INLINE T operator*(const T& a, const S& b) { T c; T::mul(c, a, b); return c; }
+ MCL_FORCE_INLINE T& operator/=(const T& rhs) { T c; T::inv(c, rhs); T::mul(static_cast<T&>(*this), static_cast<const T&>(*this), c); return static_cast<T&>(*this); }
+ static MCL_FORCE_INLINE void div(T& c, const T& a, const T& b) { T t; T::inv(t, b); T::mul(c, a, t); }
+ friend MCL_FORCE_INLINE T operator/(const T& a, const T& b) { T c; T::inv(c, b); c *= a; return c; }
+ MCL_FORCE_INLINE T operator-() const { T c; T::neg(c, static_cast<const T&>(*this)); return c; }
+ template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT>
+ static void pow(T& z, const T& x, const FpT<tag2, maxBitSize2>& y)
+ {
+ fp::Block b;
+ y.getBlock(b);
+ powArray(z, x, b.p, b.n, false, false);
+ }
+ template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT>
+ static void powGeneric(T& z, const T& x, const FpT<tag2, maxBitSize2>& y)
+ {
+ fp::Block b;
+ y.getBlock(b);
+ powArrayBase(z, x, b.p, b.n, false, false);
+ }
+ template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT>
+ static void powCT(T& z, const T& x, const FpT<tag2, maxBitSize2>& y)
+ {
+ fp::Block b;
+ y.getBlock(b);
+ powArray(z, x, b.p, b.n, false, true);
+ }
+ static void pow(T& z, const T& x, int64_t y)
+ {
+ const uint64_t u = fp::abs_(y);
+#if MCL_SIZEOF_UNIT == 8
+ powArray(z, x, &u, 1, y < 0, false);
+#else
+ uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) };
+ size_t un = ua[1] ? 2 : 1;
+ powArray(z, x, ua, un, y < 0, false);
+#endif
+ }
+ static void pow(T& z, const T& x, const mpz_class& y)
+ {
+ powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false);
+ }
+ static void powGeneric(T& z, const T& x, const mpz_class& y)
+ {
+ powArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false);
+ }
+ static void powCT(T& z, const T& x, const mpz_class& y)
+ {
+ powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true);
+ }
+ static void setPowArrayGLV(void f(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime))
+ {
+ powArrayGLV = f;
+ }
+private:
+ static void (*powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime);
+ static void powArray(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime)
+ {
+ if (powArrayGLV && (constTime || yn > 1)) {
+ powArrayGLV(z, x, y, yn, isNegative, constTime);
+ return;
+ }
+ powArrayBase(z, x, y, yn, isNegative, constTime);
+ }
+ static void powArrayBase(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime)
+ {
+ T tmp;
+ const T *px = &x;
+ if (&z == &x) {
+ tmp = x;
+ px = &tmp;
+ }
+ z = 1;
+ fp::powGeneric(z, *px, y, yn, T::mul, T::sqr, (void (*)(T&, const T&))0, constTime ? T::BaseFp::getBitSize() : 0);
+ if (isNegative) {
+ T::inv(z, z);
+ }
+ }
+};
+
+template<class T, class E>
+void (*Operator<T, E>::powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime);
+
+/*
+ T must have save and load
+*/
+template<class T, class E = Empty<T> >
+struct Serializable : public E {
+ void setStr(bool *pb, const char *str, int ioMode = 0)
+ {
+ size_t len = strlen(str);
+ size_t n = deserialize(str, len, ioMode);
+ *pb = n > 0 && n == len;
+ }
+ // return strlen(buf) if success else 0
+ size_t getStr(char *buf, size_t maxBufSize, int ioMode = 0) const
+ {
+ size_t n = serialize(buf, maxBufSize, ioMode);
+ if (n == 0 || n == maxBufSize - 1) return 0;
+ buf[n] = '\0';
+ return n;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ void setStr(const std::string& str, int ioMode = 0)
+ {
+ cybozu::StringInputStream is(str);
+ static_cast<T&>(*this).load(is, ioMode);
+ }
+ void getStr(std::string& str, int ioMode = 0) const
+ {
+ str.clear();
+ cybozu::StringOutputStream os(str);
+ static_cast<const T&>(*this).save(os, ioMode);
+ }
+ std::string getStr(int ioMode = 0) const
+ {
+ std::string str;
+ getStr(str, ioMode);
+ return str;
+ }
+#endif
+ // return written bytes
+ size_t serialize(void *buf, size_t maxBufSize, int ioMode = IoSerialize) const
+ {
+ cybozu::MemoryOutputStream os(buf, maxBufSize);
+ bool b;
+ static_cast<const T&>(*this).save(&b, os, ioMode);
+ return b ? os.getPos() : 0;
+ }
+ // return read bytes
+ size_t deserialize(const void *buf, size_t bufSize, int ioMode = IoSerialize)
+ {
+ cybozu::MemoryInputStream is(buf, bufSize);
+ bool b;
+ static_cast<T&>(*this).load(&b, is, ioMode);
+ return b ? is.getPos() : 0;
+ }
+};
+
+} } // mcl::fp
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/paillier.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/paillier.hpp
new file mode 100644
index 000000000..03e44cb16
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/paillier.hpp
@@ -0,0 +1,84 @@
+#pragma once
+/**
+ @file
+ @brief paillier encryption
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/gmp_util.hpp>
+
+namespace mcl { namespace paillier {
+
+class PublicKey {
+ size_t primeBitSize;
+ mpz_class g;
+ mpz_class n;
+ mpz_class n2;
+public:
+ PublicKey() : primeBitSize(0) {}
+ void init(size_t _primeBitSize, const mpz_class& _n)
+ {
+ primeBitSize = _primeBitSize;
+ n = _n;
+ g = 1 + _n;
+ n2 = _n * _n;
+ }
+ void enc(mpz_class& c, const mpz_class& m, mcl::fp::RandGen rg = mcl::fp::RandGen()) const
+ {
+ if (rg.isZero()) rg = mcl::fp::RandGen::get();
+ if (primeBitSize == 0) throw cybozu::Exception("paillier:PublicKey:not init");
+ mpz_class r;
+ mcl::gmp::getRand(r, primeBitSize, rg);
+ mpz_class a, b;
+ mcl::gmp::powMod(a, g, m, n2);
+ mcl::gmp::powMod(b, r, n, n2);
+ c = (a * b) % n2;
+ }
+ /*
+ additive homomorphic encryption
+ cz = cx + cy
+ */
+ void add(mpz_class& cz, mpz_class& cx, mpz_class& cy) const
+ {
+ cz = (cx * cy) % n2;
+ }
+};
+
+class SecretKey {
+ size_t primeBitSize;
+ mpz_class n;
+ mpz_class n2;
+ mpz_class lambda;
+ mpz_class invLambda;
+public:
+ SecretKey() : primeBitSize(0) {}
+ /*
+ the size of prime is half of bitSize
+ */
+ void init(size_t bitSize, mcl::fp::RandGen rg = mcl::fp::RandGen())
+ {
+ if (rg.isZero()) rg = mcl::fp::RandGen::get();
+ primeBitSize = bitSize / 2;
+ mpz_class p, q;
+ mcl::gmp::getRandPrime(p, primeBitSize, rg);
+ mcl::gmp::getRandPrime(q, primeBitSize, rg);
+ lambda = (p - 1) * (q - 1);
+ n = p * q;
+ n2 = n * n;
+ mcl::gmp::invMod(invLambda, lambda, n);
+ }
+ void getPublicKey(PublicKey& pub) const
+ {
+ pub.init(primeBitSize, n);
+ }
+ void dec(mpz_class& m, const mpz_class& c) const
+ {
+ mpz_class L;
+ mcl::gmp::powMod(L, c, lambda, n2);
+ L = ((L - 1) / n) % n;
+ m = (L * invLambda) % n;
+ }
+};
+
+} } // mcl::paillier
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/randgen.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/randgen.hpp
new file mode 100644
index 000000000..30502fc10
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/randgen.hpp
@@ -0,0 +1,156 @@
+#pragma once
+/**
+ @file
+ @brief definition of Op
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#ifdef MCL_DONT_USE_CSPRNG
+
+// nothing
+
+#elif defined(MCL_USE_WEB_CRYPTO_API)
+#include <emscripten.h>
+
+namespace mcl {
+struct RandomGeneratorJS {
+ void read(bool *pb, void *buf, uint32_t byteSize)
+ {
+ // cf. https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues
+ if (byteSize > 65536) {
+ *pb = false;
+ return;
+ }
+ // use crypto.getRandomValues
+ EM_ASM({Module.cryptoGetRandomValues($0, $1)}, buf, byteSize);
+ *pb = true;
+ }
+};
+} // mcl
+
+#else
+#include <cybozu/random_generator.hpp>
+#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+#include <random>
+#endif
+#endif
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4521)
+#endif
+namespace mcl { namespace fp {
+
+namespace local {
+
+template<class RG>
+uint32_t readWrapper(void *self, void *buf, uint32_t byteSize)
+{
+ bool b;
+ reinterpret_cast<RG*>(self)->read(&b, (uint8_t*)buf, byteSize);
+ if (b) return byteSize;
+ return 0;
+}
+
+#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+template<>
+inline uint32_t readWrapper<std::random_device>(void *self, void *buf, uint32_t byteSize)
+{
+ const uint32_t keep = byteSize;
+ std::random_device& rg = *reinterpret_cast<std::random_device*>(self);
+ uint8_t *p = reinterpret_cast<uint8_t*>(buf);
+ uint32_t v;
+ while (byteSize >= 4) {
+ v = rg();
+ memcpy(p, &v, 4);
+ p += 4;
+ byteSize -= 4;
+ }
+ if (byteSize > 0) {
+ v = rg();
+ memcpy(p, &v, byteSize);
+ }
+ return keep;
+}
+#endif
+} // local
+/*
+ wrapper of cryptographically secure pseudo random number generator
+*/
+class RandGen {
+ typedef uint32_t (*readFuncType)(void *self, void *buf, uint32_t byteSize);
+ void *self_;
+ readFuncType readFunc_;
+public:
+ RandGen() : self_(0), readFunc_(0) {}
+ RandGen(void *self, readFuncType readFunc) : self_(self) , readFunc_(readFunc) {}
+ RandGen(const RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {}
+ RandGen(RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {}
+ RandGen& operator=(const RandGen& rhs)
+ {
+ self_ = rhs.self_;
+ readFunc_ = rhs.readFunc_;
+ return *this;
+ }
+ template<class RG>
+ RandGen(RG& rg)
+ : self_(reinterpret_cast<void*>(&rg))
+ , readFunc_(local::readWrapper<RG>)
+ {
+ }
+ void read(bool *pb, void *out, size_t byteSize)
+ {
+ uint32_t size = readFunc_(self_, out, static_cast<uint32_t>(byteSize));
+ *pb = size == byteSize;
+ }
+#ifdef MCL_DONT_USE_CSPRNG
+ bool isZero() const { return false; } /* return false to avoid copying default rg */
+#else
+ bool isZero() const { return self_ == 0 && readFunc_ == 0; }
+#endif
+ static RandGen& getDefaultRandGen()
+ {
+#ifdef MCL_DONT_USE_CSPRNG
+ static RandGen wrg;
+#elif defined(MCL_USE_WEB_CRYPTO_API)
+ static mcl::RandomGeneratorJS rg;
+ static RandGen wrg(rg);
+#else
+ static cybozu::RandomGenerator rg;
+ static RandGen wrg(rg);
+#endif
+ return wrg;
+ }
+ static RandGen& get()
+ {
+ static RandGen wrg(getDefaultRandGen());
+ return wrg;
+ }
+ /*
+ rg must be thread safe
+ rg.read(void *buf, size_t byteSize);
+ */
+ static void setRandGen(const RandGen& rg)
+ {
+ get() = rg;
+ }
+ /*
+ set rand function
+ if self and readFunc are NULL then set default rand function
+ */
+ static void setRandFunc(void *self, readFuncType readFunc)
+ {
+ if (self == 0 && readFunc == 0) {
+ setRandGen(getDefaultRandGen());
+ } else {
+ RandGen rg(self, readFunc);
+ setRandGen(rg);
+ }
+ }
+};
+
+} } // mcl::fp
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/she.h b/vendor/github.com/tangerine-network/mcl/include/mcl/she.h
new file mode 100644
index 000000000..60b399c65
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/she.h
@@ -0,0 +1,270 @@
+#pragma once
+/**
+ @file
+ @brief C api of somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <mcl/bn.h>
+
+#ifdef _MSC_VER
+#ifdef MCLSHE_DLL_EXPORT
+#define MCLSHE_DLL_API __declspec(dllexport)
+#else
+#define MCLSHE_DLL_API __declspec(dllimport)
+#ifndef MCLSHE_NO_AUTOLINK
+ #if MCLBN_FP_UNIT_SIZE == 4
+ #pragma comment(lib, "mclshe256.lib")
+ #elif MCLBN_FP_UNIT_SIZE == 6
+ #pragma comment(lib, "mclshe384.lib")
+ #else
+ #pragma comment(lib, "mclshe512.lib")
+ #endif
+#endif
+#endif
+#else
+#ifdef __EMSCRIPTEN__
+ #define MCLSHE_DLL_API __attribute__((used))
+#elif defined(__wasm__)
+ #define MCLSHE_DLL_API __attribute__((visibility("default")))
+#else
+ #define MCLSHE_DLL_API
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ mclBnFr x;
+ mclBnFr y;
+} sheSecretKey;
+
+typedef struct {
+ mclBnG1 xP;
+ mclBnG2 yQ;
+} shePublicKey;
+
+struct shePrecomputedPublicKey;
+
+typedef struct {
+ mclBnG1 S;
+ mclBnG1 T;
+} sheCipherTextG1;
+
+typedef struct {
+ mclBnG2 S;
+ mclBnG2 T;
+} sheCipherTextG2;
+
+typedef struct {
+ mclBnGT g[4];
+} sheCipherTextGT;
+
+typedef struct {
+ mclBnFr d[4];
+} sheZkpBin;
+
+typedef struct {
+ mclBnFr d[4];
+} sheZkpEq;
+
+typedef struct {
+ mclBnFr d[7];
+} sheZkpBinEq;
+/*
+ initialize this library
+ call this once before using the other functions
+ @param curve [in] enum value defined in mcl/bn.h
+ @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR,
+ which macro is used to make sure that the values
+ are the same when the library is built and used
+ @return 0 if success
+ @note sheInit() is thread safe and serialized if it is called simultaneously
+ but don't call it while using other functions.
+*/
+MCLSHE_DLL_API int sheInit(int curve, int compiledTimeVar);
+
+// return written byte size if success else 0
+MCLSHE_DLL_API mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec);
+MCLSHE_DLL_API mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub);
+MCLSHE_DLL_API mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c);
+MCLSHE_DLL_API mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c);
+MCLSHE_DLL_API mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c);
+MCLSHE_DLL_API mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp);
+MCLSHE_DLL_API mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp);
+MCLSHE_DLL_API mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp);
+
+// return read byte size if sucess else 0
+MCLSHE_DLL_API mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize);
+
+/*
+ set secretKey if system has /dev/urandom or CryptGenRandom
+ return 0 if success
+*/
+MCLSHE_DLL_API int sheSecretKeySetByCSPRNG(sheSecretKey *sec);
+
+MCLSHE_DLL_API void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec);
+
+/*
+ make table to decode DLP
+ return 0 if success
+*/
+MCLSHE_DLL_API int sheSetRangeForDLP(mclSize hashSize);
+MCLSHE_DLL_API int sheSetRangeForG1DLP(mclSize hashSize);
+MCLSHE_DLL_API int sheSetRangeForG2DLP(mclSize hashSize);
+MCLSHE_DLL_API int sheSetRangeForGTDLP(mclSize hashSize);
+
+/*
+ set tryNum to decode DLP
+*/
+MCLSHE_DLL_API void sheSetTryNum(mclSize tryNum);
+
+/*
+ decode G1 via GT if use != 0
+ @note faster if tryNum >= 300
+*/
+MCLSHE_DLL_API void sheUseDecG1ViaGT(int use);
+/*
+ decode G2 via GT if use != 0
+ @note faster if tryNum >= 100
+*/
+MCLSHE_DLL_API void sheUseDecG2ViaGT(int use);
+/*
+ load table for DLP
+ return read size if success else 0
+*/
+MCLSHE_DLL_API mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize);
+MCLSHE_DLL_API mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize);
+
+/*
+ save table for DLP
+ return written size if success else 0
+*/
+MCLSHE_DLL_API mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize);
+MCLSHE_DLL_API mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize);
+MCLSHE_DLL_API mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize);
+
+// return 0 if success
+MCLSHE_DLL_API int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m);
+MCLSHE_DLL_API int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m);
+MCLSHE_DLL_API int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *ppub, mclInt m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *ppub, mclInt m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *ppub, mclInt m);
+
+/*
+ m must be 0 or 1
+*/
+MCLSHE_DLL_API int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m);
+MCLSHE_DLL_API int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m);
+MCLSHE_DLL_API int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m);
+
+/*
+ arbitary m
+*/
+MCLSHE_DLL_API int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m);
+MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m);
+
+/*
+ decode c and set m
+ return 0 if success
+*/
+MCLSHE_DLL_API int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c);
+MCLSHE_DLL_API int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c);
+MCLSHE_DLL_API int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c);
+/*
+ verify zkp
+ return 1 if valid
+*/
+MCLSHE_DLL_API int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp);
+MCLSHE_DLL_API int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp);
+MCLSHE_DLL_API int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp);
+MCLSHE_DLL_API int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp);
+MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c, const sheZkpBin *zkp);
+MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *ppub, const sheCipherTextG2 *c, const sheZkpBin *zkp);
+MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp);
+MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp);
+/*
+ decode c via GT and set m
+ return 0 if success
+*/
+MCLSHE_DLL_API int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c);
+MCLSHE_DLL_API int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c);
+
+/*
+ return 1 if dec(c) == 0
+*/
+MCLSHE_DLL_API int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c);
+MCLSHE_DLL_API int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c);
+MCLSHE_DLL_API int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c);
+
+// return 0 if success
+// y = -x
+MCLSHE_DLL_API int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x);
+MCLSHE_DLL_API int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x);
+MCLSHE_DLL_API int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x);
+
+// return 0 if success
+// z = x + y
+MCLSHE_DLL_API int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y);
+MCLSHE_DLL_API int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y);
+MCLSHE_DLL_API int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y);
+
+// return 0 if success
+// z = x - y
+MCLSHE_DLL_API int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y);
+MCLSHE_DLL_API int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y);
+MCLSHE_DLL_API int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y);
+
+// return 0 if success
+// z = x * y
+MCLSHE_DLL_API int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y);
+MCLSHE_DLL_API int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y);
+MCLSHE_DLL_API int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y);
+
+// return 0 if success
+// z = x * y
+MCLSHE_DLL_API int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y);
+/*
+ sheMul(z, x, y) = sheMulML(z, x, y) + sheFinalExpGT(z)
+ @note
+ Mul(x1, y1) + ... + Mul(xn, yn) = finalExp(MulML(x1, y1) + ... + MulML(xn, yn))
+*/
+MCLSHE_DLL_API int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y);
+MCLSHE_DLL_API int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x);
+
+// return 0 if success
+// rerandomize(c)
+MCLSHE_DLL_API int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub);
+MCLSHE_DLL_API int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub);
+MCLSHE_DLL_API int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub);
+
+// return 0 if success
+// y = convert(x)
+MCLSHE_DLL_API int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x);
+MCLSHE_DLL_API int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x);
+
+// return nonzero if success
+MCLSHE_DLL_API shePrecomputedPublicKey *shePrecomputedPublicKeyCreate();
+// call this function to avoid memory leak
+MCLSHE_DLL_API void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub);
+// return 0 if success
+MCLSHE_DLL_API int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/she.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/she.hpp
new file mode 100644
index 000000000..3ce361454
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/she.hpp
@@ -0,0 +1,1939 @@
+#pragma once
+/**
+ @file
+ @brief somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings
+ @author MITSUNARI Shigeo(@herumi)
+ see https://github.com/herumi/mcl/blob/master/misc/she/she.pdf
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <cmath>
+#include <vector>
+#include <iosfwd>
+#ifndef MCLBN_FP_UNIT_SIZE
+ #define MCLBN_FP_UNIT_SIZE 4
+#endif
+#if MCLBN_FP_UNIT_SIZE == 4
+#include <mcl/bn256.hpp>
+#elif MCLBN_FP_UNIT_SIZE == 6
+#include <mcl/bn384.hpp>
+#elif MCLBN_FP_UNIT_SIZE == 8
+#include <mcl/bn512.hpp>
+#else
+ #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8"
+#endif
+
+#include <mcl/window_method.hpp>
+#include <cybozu/endian.hpp>
+#include <cybozu/serializer.hpp>
+
+namespace mcl { namespace she {
+
+using namespace mcl::bn;
+
+namespace local {
+
+#ifndef MCLSHE_WIN_SIZE
+ #define MCLSHE_WIN_SIZE 10
+#endif
+static const size_t winSize = MCLSHE_WIN_SIZE;
+static const size_t defaultTryNum = 2048;
+
+struct KeyCount {
+ uint32_t key;
+ int32_t count; // power
+ bool operator<(const KeyCount& rhs) const
+ {
+ return key < rhs.key;
+ }
+ bool isSame(const KeyCount& rhs) const
+ {
+ return key == rhs.key && count == rhs.count;
+ }
+};
+
+template<class G, bool = true>
+struct InterfaceForHashTable : G {
+ static G& castG(InterfaceForHashTable& x) { return static_cast<G&>(x); }
+ static const G& castG(const InterfaceForHashTable& x) { return static_cast<const G&>(x); }
+ void clear() { clear(castG(*this)); }
+ void normalize() { normalize(castG(*this)); }
+ static bool isOdd(const G& P) { return P.y.isOdd(); }
+ static bool isZero(const G& P) { return P.isZero(); }
+ static bool isSameX(const G& P, const G& Q) { return P.x == Q.x; }
+ static uint32_t getHash(const G& P) { return uint32_t(*P.x.getUnit()); }
+ static void clear(G& P) { P.clear(); }
+ static void normalize(G& P) { P.normalize(); }
+ static void dbl(G& Q, const G& P) { G::dbl(Q, P); }
+ static void neg(G& Q, const G& P) { G::neg(Q, P); }
+ static void add(G& R, const G& P, const G& Q) { G::add(R, P, Q); }
+ template<class INT>
+ static void mul(G& Q, const G& P, const INT& x) { G::mul(Q, P, x); }
+};
+
+/*
+ treat Fp12 as EC
+ unitary inverse of (a, b) = (a, -b)
+ then b.a.a or -b.a.a is odd
+*/
+template<class G>
+struct InterfaceForHashTable<G, false> : G {
+ static G& castG(InterfaceForHashTable& x) { return static_cast<G&>(x); }
+ static const G& castG(const InterfaceForHashTable& x) { return static_cast<const G&>(x); }
+ void clear() { clear(castG(*this)); }
+ void normalize() { normalize(castG(*this)); }
+ static bool isOdd(const G& x) { return x.b.a.a.isOdd(); }
+ static bool isZero(const G& x) { return x.isOne(); }
+ static bool isSameX(const G& x, const G& Q) { return x.a == Q.a; }
+ static uint32_t getHash(const G& x) { return uint32_t(*x.getFp0()->getUnit()); }
+ static void clear(G& x) { x = 1; }
+ static void normalize(G&) { }
+ static void dbl(G& y, const G& x) { G::sqr(y, x); }
+ static void neg(G& Q, const G& P) { G::unitaryInv(Q, P); }
+ static void add(G& z, const G& x, const G& y) { G::mul(z, x, y); }
+ template<class INT>
+ static void mul(G& z, const G& x, const INT& y) { G::pow(z, x, y); }
+};
+
+template<class G>
+char GtoChar();
+template<>char GtoChar<bn::G1>() { return '1'; }
+template<>char GtoChar<bn::G2>() { return '2'; }
+template<>char GtoChar<bn::GT>() { return 'T'; }
+
+/*
+ HashTable<EC, true> or HashTable<Fp12, false>
+*/
+template<class G, bool isEC = true>
+class HashTable {
+ typedef InterfaceForHashTable<G, isEC> I;
+ typedef std::vector<KeyCount> KeyCountVec;
+ KeyCountVec kcv_;
+ G P_;
+ mcl::fp::WindowMethod<I> wm_;
+ G nextP_;
+ G nextNegP_;
+ size_t tryNum_;
+ void setWindowMethod()
+ {
+ const size_t bitSize = G::BaseFp::BaseFp::getBitSize();
+ wm_.init(static_cast<const I&>(P_), bitSize, local::winSize);
+ }
+public:
+ HashTable() : tryNum_(local::defaultTryNum) {}
+ bool operator==(const HashTable& rhs) const
+ {
+ if (kcv_.size() != rhs.kcv_.size()) return false;
+ for (size_t i = 0; i < kcv_.size(); i++) {
+ if (!kcv_[i].isSame(rhs.kcv_[i])) return false;
+ }
+ return P_ == rhs.P_ && nextP_ == rhs.nextP_;
+ }
+ bool operator!=(const HashTable& rhs) const { return !operator==(rhs); }
+ /*
+ compute log_P(xP) for |x| <= hashSize * tryNum
+ */
+ void init(const G& P, size_t hashSize, size_t tryNum = local::defaultTryNum)
+ {
+ if (hashSize == 0) {
+ kcv_.clear();
+ return;
+ }
+ if (hashSize >= 0x80000000u) throw cybozu::Exception("HashTable:init:hashSize is too large");
+ P_ = P;
+ tryNum_ = tryNum;
+ kcv_.resize(hashSize);
+ G xP;
+ I::clear(xP);
+ for (int i = 1; i <= (int)kcv_.size(); i++) {
+ I::add(xP, xP, P_);
+ I::normalize(xP);
+ kcv_[i - 1].key = I::getHash(xP);
+ kcv_[i - 1].count = I::isOdd(xP) ? i : -i;
+ }
+ nextP_ = xP;
+ I::dbl(nextP_, nextP_);
+ I::add(nextP_, nextP_, P_); // nextP = (hasSize * 2 + 1)P
+ I::neg(nextNegP_, nextP_); // nextNegP = -nextP
+ /*
+ ascending order of abs(count) for same key
+ */
+ std::stable_sort(kcv_.begin(), kcv_.end());
+ setWindowMethod();
+ }
+ void setTryNum(size_t tryNum)
+ {
+ this->tryNum_ = tryNum;
+ }
+ /*
+ log_P(xP)
+ find range which has same hash of xP in kcv_,
+ and detect it
+ */
+ int basicLog(G xP, bool *ok = 0) const
+ {
+ if (ok) *ok = true;
+ if (I::isZero(xP)) return 0;
+ typedef KeyCountVec::const_iterator Iter;
+ KeyCount kc;
+ I::normalize(xP);
+ kc.key = I::getHash(xP);
+ kc.count = 0;
+ std::pair<Iter, Iter> p = std::equal_range(kcv_.begin(), kcv_.end(), kc);
+ G Q;
+ I::clear(Q);
+ int prev = 0;
+ /*
+ check range which has same hash
+ */
+ while (p.first != p.second) {
+ int count = p.first->count;
+ int abs_c = std::abs(count);
+ assert(abs_c >= prev); // assume ascending order
+ bool neg = count < 0;
+ G T;
+// I::mul(T, P, abs_c - prev);
+ mulByWindowMethod(T, abs_c - prev);
+ I::add(Q, Q, T);
+ I::normalize(Q);
+ if (I::isSameX(Q, xP)) {
+ bool QisOdd = I::isOdd(Q);
+ bool xPisOdd = I::isOdd(xP);
+ if (QisOdd ^ xPisOdd ^ neg) return -count;
+ return count;
+ }
+ prev = abs_c;
+ ++p.first;
+ }
+ if (ok) {
+ *ok = false;
+ return 0;
+ }
+ throw cybozu::Exception("HashTable:basicLog:not found");
+ }
+ /*
+ compute log_P(xP)
+ call basicLog at most 2 * tryNum
+ */
+ int64_t log(const G& xP) const
+ {
+ bool ok;
+ int c = basicLog(xP, &ok);
+ if (ok) {
+ return c;
+ }
+ G posP = xP, negP = xP;
+ int64_t posCenter = 0;
+ int64_t negCenter = 0;
+ int64_t next = (int64_t)kcv_.size() * 2 + 1;
+ for (size_t i = 1; i < tryNum_; i++) {
+ I::add(posP, posP, nextNegP_);
+ posCenter += next;
+ c = basicLog(posP, &ok);
+ if (ok) {
+ return posCenter + c;
+ }
+ I::add(negP, negP, nextP_);
+ negCenter -= next;
+ c = basicLog(negP, &ok);
+ if (ok) {
+ return negCenter + c;
+ }
+ }
+ throw cybozu::Exception("HashTable:log:not found");
+ }
+ /*
+ remark
+ tryNum is not saved.
+ */
+ template<class OutputStream>
+ void save(OutputStream& os) const
+ {
+ cybozu::save(os, BN::param.cp.curveType);
+ cybozu::writeChar(os, GtoChar<G>());
+ cybozu::save(os, kcv_.size());
+ cybozu::write(os, &kcv_[0], sizeof(kcv_[0]) * kcv_.size());
+ P_.save(os);
+ }
+ size_t save(void *buf, size_t maxBufSize) const
+ {
+ cybozu::MemoryOutputStream os(buf, maxBufSize);
+ save(os);
+ return os.getPos();
+ }
+ /*
+ remark
+ tryNum is not set
+ */
+ template<class InputStream>
+ void load(InputStream& is)
+ {
+ int curveType;
+ cybozu::load(curveType, is);
+ if (curveType != BN::param.cp.curveType) throw cybozu::Exception("HashTable:bad curveType") << curveType;
+ char c = 0;
+ if (!cybozu::readChar(&c, is) || c != GtoChar<G>()) throw cybozu::Exception("HashTable:bad c") << (int)c;
+ size_t kcvSize;
+ cybozu::load(kcvSize, is);
+ kcv_.resize(kcvSize);
+ cybozu::read(&kcv_[0], sizeof(kcv_[0]) * kcvSize, is);
+ P_.load(is);
+ I::mul(nextP_, P_, (kcvSize * 2) + 1);
+ I::neg(nextNegP_, nextP_);
+ setWindowMethod();
+ }
+ size_t load(const void *buf, size_t bufSize)
+ {
+ cybozu::MemoryInputStream is(buf, bufSize);
+ load(is);
+ return is.getPos();
+ }
+ const mcl::fp::WindowMethod<I>& getWM() const { return wm_; }
+ /*
+ mul(x, P, y);
+ */
+ template<class T>
+ void mulByWindowMethod(G& x, const T& y) const
+ {
+ wm_.mul(static_cast<I&>(x), y);
+ }
+};
+
+template<class G>
+int log(const G& P, const G& xP)
+{
+ if (xP.isZero()) return 0;
+ if (xP == P) return 1;
+ G negT;
+ G::neg(negT, P);
+ if (xP == negT) return -1;
+ G T = P;
+ for (int i = 2; i < 100; i++) {
+ T += P;
+ if (xP == T) return i;
+ G::neg(negT, T);
+ if (xP == negT) return -i;
+ }
+ throw cybozu::Exception("she:log:not found");
+}
+
+} // mcl::she::local
+
+template<size_t dummyInpl = 0>
+struct SHET {
+ class SecretKey;
+ class PublicKey;
+ class PrecomputedPublicKey;
+ // additive HE
+ class CipherTextA; // = CipherTextG1 + CipherTextG2
+ class CipherTextGT; // multiplicative HE
+ class CipherText; // CipherTextA + CipherTextGT
+
+ static G1 P_;
+ static G2 Q_;
+ static GT ePQ_; // e(P, Q)
+ static std::vector<Fp6> Qcoeff_;
+ static local::HashTable<G1> PhashTbl_;
+ static local::HashTable<G2> QhashTbl_;
+ static mcl::fp::WindowMethod<G2> Qwm_;
+ typedef local::InterfaceForHashTable<GT, false> GTasEC;
+ static local::HashTable<GT, false> ePQhashTbl_;
+ static bool useDecG1ViaGT_;
+ static bool useDecG2ViaGT_;
+ static bool isG1only_;
+private:
+ template<class G>
+ class CipherTextAT : public fp::Serializable<CipherTextAT<G> > {
+ G S_, T_;
+ friend class SecretKey;
+ friend class PublicKey;
+ friend class PrecomputedPublicKey;
+ friend class CipherTextA;
+ friend class CipherTextGT;
+ bool isZero(const Fr& x) const
+ {
+ G xT;
+ G::mul(xT, T_, x);
+ return S_ == xT;
+ }
+ public:
+ const G& getS() const { return S_; }
+ const G& getT() const { return T_; }
+ void clear()
+ {
+ S_.clear();
+ T_.clear();
+ }
+ static void add(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y)
+ {
+ /*
+ (S, T) + (S', T') = (S + S', T + T')
+ */
+ G::add(z.S_, x.S_, y.S_);
+ G::add(z.T_, x.T_, y.T_);
+ }
+ static void sub(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y)
+ {
+ /*
+ (S, T) - (S', T') = (S - S', T - T')
+ */
+ G::sub(z.S_, x.S_, y.S_);
+ G::sub(z.T_, x.T_, y.T_);
+ }
+ // INT = int64_t or Fr
+ template<class INT>
+ static void mul(CipherTextAT& z, const CipherTextAT& x, const INT& y)
+ {
+ G::mul(z.S_, x.S_, y);
+ G::mul(z.T_, x.T_, y);
+ }
+ static void neg(CipherTextAT& y, const CipherTextAT& x)
+ {
+ G::neg(y.S_, x.S_);
+ G::neg(y.T_, x.T_);
+ }
+ void add(const CipherTextAT& c) { add(*this, *this, c); }
+ void sub(const CipherTextAT& c) { sub(*this, *this, c); }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ S_.load(pb, is, ioMode); if (!*pb) return;
+ T_.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ S_.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ T_.save(pb, os, ioMode);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextA:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextA:save");
+ }
+ friend std::istream& operator>>(std::istream& is, CipherTextAT& self)
+ {
+ self.load(is, fp::detectIoMode(G::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const CipherTextAT& self)
+ {
+ self.save(os, fp::detectIoMode(G::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const CipherTextAT& rhs) const
+ {
+ return S_ == rhs.S_ && T_ == rhs.T_;
+ }
+ bool operator!=(const CipherTextAT& rhs) const { return !operator==(rhs); }
+ };
+ /*
+ g1 = millerLoop(P1, Q)
+ g2 = millerLoop(P2, Q)
+ */
+ static void doubleMillerLoop(GT& g1, GT& g2, const G1& P1, const G1& P2, const G2& Q)
+ {
+#if 1
+ std::vector<Fp6> Qcoeff;
+ precomputeG2(Qcoeff, Q);
+ precomputedMillerLoop(g1, P1, Qcoeff);
+ precomputedMillerLoop(g2, P2, Qcoeff);
+#else
+ millerLoop(g1, P1, Q);
+ millerLoop(g2, P2, Q);
+#endif
+ }
+ static void finalExp4(GT out[4], const GT in[4])
+ {
+ for (int i = 0; i < 4; i++) {
+ finalExp(out[i], in[i]);
+ }
+ }
+ static void tensorProductML(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2)
+ {
+ /*
+ (S1, T1) x (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2))
+ */
+ doubleMillerLoop(g[0], g[2], S1, T1, S2);
+ doubleMillerLoop(g[1], g[3], S1, T1, T2);
+ }
+ static void tensorProduct(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2)
+ {
+ /*
+ (S1, T1) x (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2))
+ */
+ tensorProductML(g,S1, T1, S2,T2);
+ finalExp4(g, g);
+ }
+ template<class Tag, size_t n>
+ struct ZkpT : public fp::Serializable<ZkpT<Tag, n> > {
+ Fr d_[n];
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ for (size_t i = 0; i < n; i++) {
+ d_[i].load(pb, is, ioMode); if (!*pb) return;
+ }
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ d_[0].save(pb, os, ioMode); if (!*pb) return;
+ for (size_t i = 1; i < n; i++) {
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ d_[i].save(pb, os, ioMode);
+ }
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:ZkpT:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:ZkpT:save");
+ }
+ friend std::istream& operator>>(std::istream& is, ZkpT& self)
+ {
+ self.load(is, fp::detectIoMode(Fr::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const ZkpT& self)
+ {
+ self.save(os, fp::detectIoMode(Fr::getIoMode(), os));
+ return os;
+ }
+ };
+ struct ZkpBinTag;
+ struct ZkpEqTag; // d_[] = { c, sp, ss, sm }
+ struct ZkpBinEqTag; // d_[] = { d0, d1, sp0, sp1, ss, sp, sm }
+public:
+ /*
+ Zkp for m = 0 or 1
+ */
+ typedef ZkpT<ZkpBinTag, 4> ZkpBin;
+ /*
+ Zkp for decG1(c1) == decG2(c2)
+ */
+ typedef ZkpT<ZkpEqTag, 4> ZkpEq;
+ /*
+ Zkp for (m = 0 or 1) and decG1(c1) == decG2(c2)
+ */
+ typedef ZkpT<ZkpBinEqTag, 7> ZkpBinEq;
+
+ typedef CipherTextAT<G1> CipherTextG1;
+ typedef CipherTextAT<G2> CipherTextG2;
+
+ static void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum)
+ {
+ initPairing(cp);
+ hashAndMapToG1(P_, "0");
+ hashAndMapToG2(Q_, "0");
+ pairing(ePQ_, P_, Q_);
+ precomputeG2(Qcoeff_, Q_);
+ setRangeForDLP(hashSize);
+ useDecG1ViaGT_ = false;
+ useDecG2ViaGT_ = false;
+ isG1only_ = false;
+ setTryNum(tryNum);
+ }
+ static void init(size_t hashSize, size_t tryNum = local::defaultTryNum)
+ {
+ init(mcl::BN254, hashSize, tryNum);
+ }
+ /*
+ standard lifted ElGamal encryption
+ */
+ static void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum)
+ {
+ Fp::init(para.p);
+ Fr::init(para.n);
+ G1::init(para.a, para.b);
+ const Fp x0(para.gx);
+ const Fp y0(para.gy);
+ P_.set(x0, y0);
+
+ setRangeForG1DLP(hashSize);
+ useDecG1ViaGT_ = false;
+ useDecG2ViaGT_ = false;
+ isG1only_ = true;
+ setTryNum(tryNum);
+ }
+ /*
+ set range for G1-DLP
+ */
+ static void setRangeForG1DLP(size_t hashSize)
+ {
+ PhashTbl_.init(P_, hashSize);
+ }
+ /*
+ set range for G2-DLP
+ */
+ static void setRangeForG2DLP(size_t hashSize)
+ {
+ QhashTbl_.init(Q_, hashSize);
+ }
+ /*
+ set range for GT-DLP
+ */
+ static void setRangeForGTDLP(size_t hashSize)
+ {
+ ePQhashTbl_.init(ePQ_, hashSize);
+ }
+ /*
+ set range for G1/G2/GT DLP
+ decode message m for |m| <= hasSize * tryNum
+ decode time = O(log(hasSize) * tryNum)
+ */
+ static void setRangeForDLP(size_t hashSize)
+ {
+ setRangeForG1DLP(hashSize);
+ setRangeForG2DLP(hashSize);
+ setRangeForGTDLP(hashSize);
+ }
+ static void setTryNum(size_t tryNum)
+ {
+ PhashTbl_.setTryNum(tryNum);
+ QhashTbl_.setTryNum(tryNum);
+ ePQhashTbl_.setTryNum(tryNum);
+ }
+ static void useDecG1ViaGT(bool use = true)
+ {
+ useDecG1ViaGT_ = use;
+ }
+ static void useDecG2ViaGT(bool use = true)
+ {
+ useDecG2ViaGT_ = use;
+ }
+ /*
+ only one element is necessary for each G1 and G2.
+ this is better than David Mandell Freeman's algorithm
+ */
+ class SecretKey : public fp::Serializable<SecretKey> {
+ Fr x_, y_;
+ void getPowOfePQ(GT& v, const CipherTextGT& c) const
+ {
+ /*
+ (s, t, u, v) := (e(S, S'), e(S, T'), e(T, S'), e(T, T'))
+ s v^(xy) / (t^y u^x) = s (v^x / t) ^ y / u^x
+ = e(P, Q)^(mm')
+ */
+ GT t, u;
+ GT::unitaryInv(t, c.g_[1]);
+ GT::unitaryInv(u, c.g_[2]);
+ GT::pow(v, c.g_[3], x_);
+ v *= t;
+ GT::pow(v, v, y_);
+ GT::pow(u, u, x_);
+ v *= u;
+ v *= c.g_[0];
+ }
+ public:
+ void setByCSPRNG()
+ {
+ x_.setRand();
+ if (!isG1only_) y_.setRand();
+ }
+ /*
+ set xP and yQ
+ */
+ void getPublicKey(PublicKey& pub) const
+ {
+ pub.set(x_, y_);
+ }
+#if 0
+ // log_x(y)
+ int log(const GT& x, const GT& y) const
+ {
+ if (y == 1) return 0;
+ if (y == x) return 1;
+ GT inv;
+ GT::unitaryInv(inv, x);
+ if (y == inv) return -1;
+ GT t = x;
+ for (int i = 2; i < 100; i++) {
+ t *= x;
+ if (y == t) return i;
+ GT::unitaryInv(inv, t);
+ if (y == inv) return -i;
+ }
+ throw cybozu::Exception("she:dec:log:not found");
+ }
+#endif
+ int64_t dec(const CipherTextG1& c) const
+ {
+ if (useDecG1ViaGT_) return decViaGT(c);
+ /*
+ S = mP + rxP
+ T = rP
+ R = S - xT = mP
+ */
+ G1 R;
+ G1::mul(R, c.T_, x_);
+ G1::sub(R, c.S_, R);
+ return PhashTbl_.log(R);
+ }
+ int64_t dec(const CipherTextG2& c) const
+ {
+ if (useDecG2ViaGT_) return decViaGT(c);
+ G2 R;
+ G2::mul(R, c.T_, y_);
+ G2::sub(R, c.S_, R);
+ return QhashTbl_.log(R);
+ }
+ int64_t dec(const CipherTextA& c) const
+ {
+ return dec(c.c1_);
+ }
+ int64_t dec(const CipherTextGT& c) const
+ {
+ GT v;
+ getPowOfePQ(v, c);
+ return ePQhashTbl_.log(v);
+// return log(g, v);
+ }
+ int64_t decViaGT(const CipherTextG1& c) const
+ {
+ G1 R;
+ G1::mul(R, c.T_, x_);
+ G1::sub(R, c.S_, R);
+ GT v;
+ pairing(v, R, Q_);
+ return ePQhashTbl_.log(v);
+ }
+ int64_t decViaGT(const CipherTextG2& c) const
+ {
+ G2 R;
+ G2::mul(R, c.T_, y_);
+ G2::sub(R, c.S_, R);
+ GT v;
+ pairing(v, P_, R);
+ return ePQhashTbl_.log(v);
+ }
+ int64_t dec(const CipherText& c) const
+ {
+ if (c.isMultiplied()) {
+ return dec(c.m_);
+ } else {
+ return dec(c.a_);
+ }
+ }
+ bool isZero(const CipherTextG1& c) const
+ {
+ return c.isZero(x_);
+ }
+ bool isZero(const CipherTextG2& c) const
+ {
+ return c.isZero(y_);
+ }
+ bool isZero(const CipherTextA& c) const
+ {
+ return c.c1_.isZero(x_);
+ }
+ bool isZero(const CipherTextGT& c) const
+ {
+ GT v;
+ getPowOfePQ(v, c);
+ return v.isOne();
+ }
+ bool isZero(const CipherText& c) const
+ {
+ if (c.isMultiplied()) {
+ return isZero(c.m_);
+ } else {
+ return isZero(c.a_);
+ }
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ x_.load(pb, is, ioMode); if (!*pb) return;
+ if (!isG1only_) y_.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ x_.save(pb, os, ioMode); if (!*pb) return;
+ if (isG1only_) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ y_.save(os, ioMode);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:SecretKey:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:SecretKey:save");
+ }
+ friend std::istream& operator>>(std::istream& is, SecretKey& self)
+ {
+ self.load(is, fp::detectIoMode(Fr::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const SecretKey& self)
+ {
+ self.save(os, fp::detectIoMode(Fr::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const SecretKey& rhs) const
+ {
+ return x_ == rhs.x_ && (isG1only_ || y_ == rhs.y_);
+ }
+ bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); }
+ };
+private:
+ /*
+ simple ElGamal encryptionfor G1 and G2
+ (S, T) = (m P + r xP, rP)
+ Pmul.mul(X, a) // X = a P
+ xPmul.mul(X, a) // X = a xP
+ use *encRand if encRand is not null
+ */
+ template<class G, class INT, class MulG, class I>
+ static void ElGamalEnc(G& S, G& T, const INT& m, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul, const Fr *encRand = 0)
+ {
+ Fr r;
+ if (encRand) {
+ r = *encRand;
+ } else {
+ r.setRand();
+ }
+ Pmul.mul(static_cast<I&>(T), r);
+ xPmul.mul(S, r); // S = r xP
+ if (m == 0) return;
+ G C;
+ Pmul.mul(static_cast<I&>(C), m);
+ S += C;
+ }
+ /*
+ https://github.com/herumi/mcl/blob/master/misc/she/nizkp.pdf
+
+ encRand is a random value used for ElGamalEnc()
+ d[1-m] ; rand
+ s[1-m] ; rand
+ R[0][1-m] = s[1-m] P - d[1-m] T
+ R[1][1-m] = s[1-m] xP - d[1-m] (S - (1-m) P)
+ r ; rand
+ R[0][m] = r P
+ R[1][m] = r xP
+ c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1])
+ d[m] = c - d[1-m]
+ s[m] = r + d[m] encRand
+ */
+ template<class G, class I, class MulG>
+ static void makeZkpBin(ZkpBin& zkp, const G& S, const G& T, const Fr& encRand, const G& P, int m, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul)
+ {
+ if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBin:bad m") << m;
+ Fr *s = &zkp.d_[0];
+ Fr *d = &zkp.d_[2];
+ G R[2][2];
+ d[1-m].setRand();
+ s[1-m].setRand();
+ G T1, T2;
+ Pmul.mul(static_cast<I&>(T1), s[1-m]); // T1 = s[1-m] P
+ G::mul(T2, T, d[1-m]);
+ G::sub(R[0][1-m], T1, T2); // s[1-m] P - d[1-m]T
+ xPmul.mul(T1, s[1-m]); // T1 = s[1-m] xP
+ if (m == 0) {
+ G::sub(T2, S, P);
+ G::mul(T2, T2, d[1-m]);
+ } else {
+ G::mul(T2, S, d[1-m]);
+ }
+ G::sub(R[1][1-m], T1, T2); // s[1-m] xP - d[1-m](S - (1-m) P)
+ Fr r;
+ r.setRand();
+ Pmul.mul(static_cast<I&>(R[0][m]), r); // R[0][m] = r P
+ xPmul.mul(R[1][m], r); // R[1][m] = r xP
+ char buf[sizeof(G) * 2];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S.save(os);
+ T.save(os);
+ R[0][0].save(os);
+ R[0][1].save(os);
+ R[1][0].save(os);
+ R[1][1].save(os);
+ Fr c;
+ c.setHashOf(buf, os.getPos());
+ d[m] = c - d[1-m];
+ s[m] = r + d[m] * encRand;
+ }
+ /*
+ R[0][i] = s[i] P - d[i] T ; i = 0,1
+ R[1][0] = s[0] xP - d[0] S
+ R[1][1] = s[1] xP - d[1](S - P)
+ c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1])
+ c == d[0] + d[1]
+ */
+ template<class G, class I, class MulG>
+ static bool verifyZkpBin(const G& S, const G& T, const G& P, const ZkpBin& zkp, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul)
+ {
+ const Fr *s = &zkp.d_[0];
+ const Fr *d = &zkp.d_[2];
+ G R[2][2];
+ G T1, T2;
+ for (int i = 0; i < 2; i++) {
+ Pmul.mul(static_cast<I&>(T1), s[i]); // T1 = s[i] P
+ G::mul(T2, T, d[i]);
+ G::sub(R[0][i], T1, T2);
+ }
+ xPmul.mul(T1, s[0]); // T1 = s[0] xP
+ G::mul(T2, S, d[0]);
+ G::sub(R[1][0], T1, T2);
+ xPmul.mul(T1, s[1]); // T1 = x[1] xP
+ G::sub(T2, S, P);
+ G::mul(T2, T2, d[1]);
+ G::sub(R[1][1], T1, T2);
+ char buf[sizeof(G) * 2];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S.save(os);
+ T.save(os);
+ R[0][0].save(os);
+ R[0][1].save(os);
+ R[1][0].save(os);
+ R[1][1].save(os);
+ Fr c;
+ c.setHashOf(buf, os.getPos());
+ return c == d[0] + d[1];
+ }
+ /*
+ encRand1, encRand2 are random values use for ElGamalEnc()
+ */
+ template<class G1, class G2, class INT, class I1, class I2, class MulG1, class MulG2>
+ static void makeZkpEq(ZkpEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, const INT& m, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul)
+ {
+ Fr p, s;
+ p.setRand();
+ s.setRand();
+ ElGamalEnc(S1, T1, m, Pmul, xPmul, &p);
+ ElGamalEnc(S2, T2, m, Qmul, yQmul, &s);
+ Fr rp, rs, rm;
+ rp.setRand();
+ rs.setRand();
+ rm.setRand();
+ G1 R1, R2;
+ G2 R3, R4;
+ ElGamalEnc(R1, R2, rm, Pmul, xPmul, &rp);
+ ElGamalEnc(R3, R4, rm, Qmul, yQmul, &rs);
+ char buf[sizeof(G1) * 4 + sizeof(G2) * 4];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S1.save(os);
+ T1.save(os);
+ S2.save(os);
+ T2.save(os);
+ R1.save(os);
+ R2.save(os);
+ R3.save(os);
+ R4.save(os);
+ Fr& c = zkp.d_[0];
+ Fr& sp = zkp.d_[1];
+ Fr& ss = zkp.d_[2];
+ Fr& sm = zkp.d_[3];
+ c.setHashOf(buf, os.getPos());
+ Fr::mul(sp, c, p);
+ sp += rp;
+ Fr::mul(ss, c, s);
+ ss += rs;
+ Fr::mul(sm, c, m);
+ sm += rm;
+ }
+ template<class G1, class G2, class I1, class I2, class MulG1, class MulG2>
+ static bool verifyZkpEq(const ZkpEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul)
+ {
+ const Fr& c = zkp.d_[0];
+ const Fr& sp = zkp.d_[1];
+ const Fr& ss = zkp.d_[2];
+ const Fr& sm = zkp.d_[3];
+ G1 R1, R2, X1;
+ G2 R3, R4, X2;
+ ElGamalEnc(R1, R2, sm, Pmul, xPmul, &sp);
+ G1::mul(X1, S1, c);
+ R1 -= X1;
+ G1::mul(X1, T1, c);
+ R2 -= X1;
+ ElGamalEnc(R3, R4, sm, Qmul, yQmul, &ss);
+ G2::mul(X2, S2, c);
+ R3 -= X2;
+ G2::mul(X2, T2, c);
+ R4 -= X2;
+ char buf[sizeof(G1) * 4 + sizeof(G2) * 4];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S1.save(os);
+ T1.save(os);
+ S2.save(os);
+ T2.save(os);
+ R1.save(os);
+ R2.save(os);
+ R3.save(os);
+ R4.save(os);
+ Fr c2;
+ c2.setHashOf(buf, os.getPos());
+ return c == c2;
+ }
+ /*
+ encRand1, encRand2 are random values use for ElGamalEnc()
+ */
+ template<class G1, class G2, class I1, class I2, class MulG1, class MulG2>
+ static void makeZkpBinEq(ZkpBinEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, int m, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul)
+ {
+ if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBinEq:bad m") << m;
+ Fr *d = &zkp.d_[0];
+ Fr *spm = &zkp.d_[2];
+ Fr& ss = zkp.d_[4];
+ Fr& sp = zkp.d_[5];
+ Fr& sm = zkp.d_[6];
+ Fr p, s;
+ p.setRand();
+ s.setRand();
+ ElGamalEnc(S1, T1, m, Pmul, xPmul, &p);
+ ElGamalEnc(S2, T2, m, Qmul, yQmul, &s);
+ d[1-m].setRand();
+ spm[1-m].setRand();
+ G1 R1[2], R2[2], X1;
+ Pmul.mul(static_cast<I1&>(R1[1-m]), spm[1-m]);
+ G1::mul(X1, T1, d[1-m]);
+ R1[1-m] -= X1;
+ if (m == 0) {
+ G1::sub(X1, S1, P_);
+ G1::mul(X1, X1, d[1-m]);
+ } else {
+ G1::mul(X1, S1, d[1-m]);
+ }
+ xPmul.mul(R2[1-m], spm[1-m]);
+ R2[1-m] -= X1;
+ Fr rpm, rp, rs, rm;
+ rpm.setRand();
+ rp.setRand();
+ rs.setRand();
+ rm.setRand();
+ ElGamalEnc(R2[m], R1[m], 0, Pmul, xPmul, &rpm);
+ G1 R3, R4;
+ G2 R5, R6;
+ ElGamalEnc(R4, R3, rm, Pmul, xPmul, &rp);
+ ElGamalEnc(R6, R5, rm, Qmul, yQmul, &rs);
+ char buf[sizeof(Fr) * 12];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S1.save(os);
+ T1.save(os);
+ R1[0].save(os);
+ R1[1].save(os);
+ R2[0].save(os);
+ R2[1].save(os);
+ R3.save(os);
+ R4.save(os);
+ R5.save(os);
+ R6.save(os);
+ Fr c;
+ c.setHashOf(buf, os.getPos());
+ Fr::sub(d[m], c, d[1-m]);
+ Fr::mul(spm[m], d[m], p);
+ spm[m] += rpm;
+ Fr::mul(sp, c, p);
+ sp += rp;
+ Fr::mul(ss, c, s);
+ ss += rs;
+ Fr::mul(sm, c, m);
+ sm += rm;
+ }
+ template<class G1, class G2, class I1, class I2, class MulG1, class MulG2>
+ static bool verifyZkpBinEq(const ZkpBinEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul)
+ {
+ const Fr *d = &zkp.d_[0];
+ const Fr *spm = &zkp.d_[2];
+ const Fr& ss = zkp.d_[4];
+ const Fr& sp = zkp.d_[5];
+ const Fr& sm = zkp.d_[6];
+ G1 R1[2], R2[2], X1;
+ for (int i = 0; i < 2; i++) {
+ Pmul.mul(static_cast<I1&>(R1[i]), spm[i]);
+ G1::mul(X1, T1, d[i]);
+ R1[i] -= X1;
+ }
+ xPmul.mul(R2[0], spm[0]);
+ G1::mul(X1, S1, d[0]);
+ R2[0] -= X1;
+ xPmul.mul(R2[1], spm[1]);
+ G1::sub(X1, S1, P_);
+ G1::mul(X1, X1, d[1]);
+ R2[1] -= X1;
+ Fr c;
+ Fr::add(c, d[0], d[1]);
+ G1 R3, R4;
+ G2 R5, R6;
+ ElGamalEnc(R4, R3, sm, Pmul, xPmul, &sp);
+ G1::mul(X1, T1, c);
+ R3 -= X1;
+ G1::mul(X1, S1, c);
+ R4 -= X1;
+ ElGamalEnc(R6, R5, sm, Qmul, yQmul, &ss);
+ G2 X2;
+ G2::mul(X2, T2, c);
+ R5 -= X2;
+ G2::mul(X2, S2, c);
+ R6 -= X2;
+ char buf[sizeof(Fr) * 12];
+ cybozu::MemoryOutputStream os(buf, sizeof(buf));
+ S1.save(os);
+ T1.save(os);
+ R1[0].save(os);
+ R1[1].save(os);
+ R2[0].save(os);
+ R2[1].save(os);
+ R3.save(os);
+ R4.save(os);
+ R5.save(os);
+ R6.save(os);
+ Fr c2;
+ c2.setHashOf(buf, os.getPos());
+ return c == c2;
+ }
+ /*
+ common method for PublicKey and PrecomputedPublicKey
+ */
+ template<class T>
+ struct PublicKeyMethod {
+ /*
+ you can use INT as int64_t and Fr,
+ but the return type of dec() is int64_t.
+ */
+ template<class INT>
+ void enc(CipherTextG1& c, const INT& m) const
+ {
+ static_cast<const T&>(*this).encG1(c, m);
+ }
+ template<class INT>
+ void enc(CipherTextG2& c, const INT& m) const
+ {
+ static_cast<const T&>(*this).encG2(c, m);
+ }
+ template<class INT>
+ void enc(CipherTextA& c, const INT& m) const
+ {
+ enc(c.c1_, m);
+ enc(c.c2_, m);
+ }
+ template<class INT>
+ void enc(CipherTextGT& c, const INT& m) const
+ {
+ static_cast<const T&>(*this).encGT(c, m);
+ }
+ template<class INT>
+ void enc(CipherText& c, const INT& m, bool multiplied = false) const
+ {
+ c.isMultiplied_ = multiplied;
+ if (multiplied) {
+ enc(c.m_, m);
+ } else {
+ enc(c.a_, m);
+ }
+ }
+ /*
+ reRand method is for circuit privacy
+ */
+ template<class CT>
+ void reRandT(CT& c) const
+ {
+ CT c0;
+ static_cast<const T&>(*this).enc(c0, 0);
+ CT::add(c, c, c0);
+ }
+ void reRand(CipherTextG1& c) const { reRandT(c); }
+ void reRand(CipherTextG2& c) const { reRandT(c); }
+ void reRand(CipherTextGT& c) const { reRandT(c); }
+ void reRand(CipherText& c) const
+ {
+ if (c.isMultiplied()) {
+ reRandT(c.m_);
+ } else {
+ reRandT(c.a_);
+ }
+ }
+ /*
+ convert from CipherTextG1 to CipherTextGT
+ */
+ void convert(CipherTextGT& cm, const CipherTextG1& c1) const
+ {
+ /*
+ Enc(1) = (S, T) = (Q + r yQ, rQ) = (Q, 0) if r = 0
+ cm = c1 * (Q, 0) = (S, T) * (Q, 0) = (e(S, Q), 1, e(T, Q), 1)
+ */
+ precomputedMillerLoop(cm.g_[0], c1.getS(), Qcoeff_);
+ finalExp(cm.g_[0], cm.g_[0]);
+ precomputedMillerLoop(cm.g_[2], c1.getT(), Qcoeff_);
+ finalExp(cm.g_[2], cm.g_[2]);
+
+ cm.g_[1] = cm.g_[3] = 1;
+ }
+ /*
+ convert from CipherTextG2 to CipherTextGT
+ */
+ void convert(CipherTextGT& cm, const CipherTextG2& c2) const
+ {
+ /*
+ Enc(1) = (S, T) = (P + r xP, rP) = (P, 0) if r = 0
+ cm = (P, 0) * c2 = (e(P, S), e(P, T), 1, 1)
+ */
+ pairing(cm.g_[0], P_, c2.getS());
+ pairing(cm.g_[1], P_, c2.getT());
+ cm.g_[2] = cm.g_[3] = 1;
+ }
+ void convert(CipherTextGT& cm, const CipherTextA& ca) const
+ {
+ convert(cm, ca.c1_);
+ }
+ void convert(CipherText& cm, const CipherText& ca) const
+ {
+ if (ca.isMultiplied()) throw cybozu::Exception("she:PublicKey:convertCipherText:already isMultiplied");
+ cm.isMultiplied_ = true;
+ convert(cm.m_, ca.a_);
+ }
+ };
+public:
+ class PublicKey : public fp::Serializable<PublicKey,
+ PublicKeyMethod<PublicKey> > {
+ G1 xP_;
+ G2 yQ_;
+ friend class SecretKey;
+ friend class PrecomputedPublicKey;
+ template<class T>
+ friend struct PublicKeyMethod;
+ template<class G>
+ struct MulG {
+ const G& base;
+ MulG(const G& base) : base(base) {}
+ template<class INT>
+ void mul(G& out, const INT& m) const
+ {
+ G::mul(out, base, m);
+ }
+ };
+ void set(const Fr& x, const Fr& y)
+ {
+ G1::mul(xP_, P_, x);
+ if (!isG1only_) G2::mul(yQ_, Q_, y);
+ }
+ template<class INT>
+ void encG1(CipherTextG1& c, const INT& m) const
+ {
+ const MulG<G1> xPmul(xP_);
+ ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul);
+ }
+ template<class INT>
+ void encG2(CipherTextG2& c, const INT& m) const
+ {
+ const MulG<G2> yQmul(yQ_);
+ ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul);
+ }
+public:
+ void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const
+ {
+ Fr encRand;
+ encRand.setRand();
+ const MulG<G1> xPmul(xP_);
+ ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul, &encRand);
+ makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPmul);
+ }
+ void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const
+ {
+ Fr encRand;
+ encRand.setRand();
+ const MulG<G2> yQmul(yQ_);
+ ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul, &encRand);
+ makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQmul);
+ }
+ bool verify(const CipherTextG1& c, const ZkpBin& zkp) const
+ {
+ const MulG<G1> xPmul(xP_);
+ return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPmul);
+ }
+ bool verify(const CipherTextG2& c, const ZkpBin& zkp) const
+ {
+ const MulG<G2> yQmul(yQ_);
+ return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQmul);
+ }
+ template<class INT>
+ void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const
+ {
+ const MulG<G1> xPmul(xP_);
+ const MulG<G2> yQmul(yQ_);
+ makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul);
+ }
+ bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const
+ {
+ const MulG<G1> xPmul(xP_);
+ const MulG<G2> yQmul(yQ_);
+ return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul);
+ }
+ void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const
+ {
+ const MulG<G1> xPmul(xP_);
+ const MulG<G2> yQmul(yQ_);
+ makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul);
+ }
+ bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const
+ {
+ const MulG<G1> xPmul(xP_);
+ const MulG<G2> yQmul(yQ_);
+ return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul);
+ }
+ template<class INT>
+ void encGT(CipherTextGT& c, const INT& m) const
+ {
+ /*
+ (s, t, u, v) = ((e^x)^a (e^y)^b (e^-xy)^c e^m, e^b, e^a, e^c)
+ s = e(a xP + m P, Q)e(b P - c xP, yQ)
+ */
+ Fr ra, rb, rc;
+ ra.setRand();
+ rb.setRand();
+ rc.setRand();
+ GT e;
+
+ G1 P1, P2;
+ G1::mul(P1, xP_, ra);
+ if (m) {
+// G1::mul(P2, P, m);
+ PhashTbl_.mulByWindowMethod(P2, m);
+ P1 += P2;
+ }
+// millerLoop(c.g[0], P1, Q);
+ precomputedMillerLoop(c.g_[0], P1, Qcoeff_);
+// G1::mul(P1, P, rb);
+ PhashTbl_.mulByWindowMethod(P1, rb);
+ G1::mul(P2, xP_, rc);
+ P1 -= P2;
+ millerLoop(e, P1, yQ_);
+ c.g_[0] *= e;
+ finalExp(c.g_[0], c.g_[0]);
+#if 1
+ ePQhashTbl_.mulByWindowMethod(c.g_[1], rb);
+ ePQhashTbl_.mulByWindowMethod(c.g_[2], ra);
+ ePQhashTbl_.mulByWindowMethod(c.g_[3], rc);
+#else
+ GT::pow(c.g_[1], ePQ_, rb);
+ GT::pow(c.g_[2], ePQ_, ra);
+ GT::pow(c.g_[3], ePQ_, rc);
+#endif
+ }
+ public:
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ xP_.load(pb, is, ioMode); if (!*pb) return;
+ if (!isG1only_) yQ_.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ xP_.save(pb, os, ioMode); if (!*pb) return;
+ if (isG1only_) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ yQ_.save(pb, os, ioMode);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:PublicKey:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:PublicKey:save");
+ }
+ friend std::istream& operator>>(std::istream& is, PublicKey& self)
+ {
+ self.load(is, fp::detectIoMode(G1::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const PublicKey& self)
+ {
+ self.save(os, fp::detectIoMode(G1::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const PublicKey& rhs) const
+ {
+ return xP_ == rhs.xP_ && (isG1only_ || yQ_ == rhs.yQ_);
+ }
+ bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); }
+ };
+
+ class PrecomputedPublicKey : public fp::Serializable<PrecomputedPublicKey,
+ PublicKeyMethod<PrecomputedPublicKey> > {
+ typedef local::InterfaceForHashTable<GT, false> GTasEC;
+ typedef mcl::fp::WindowMethod<GTasEC> GTwin;
+ template<class T>
+ friend struct PublicKeyMethod;
+ GT exPQ_;
+ GT eyPQ_;
+ GT exyPQ_;
+ GTwin exPQwm_;
+ GTwin eyPQwm_;
+ GTwin exyPQwm_;
+ mcl::fp::WindowMethod<G1> xPwm_;
+ mcl::fp::WindowMethod<G2> yQwm_;
+ template<class T>
+ void mulByWindowMethod(GT& x, const GTwin& wm, const T& y) const
+ {
+ wm.mul(static_cast<GTasEC&>(x), y);
+ }
+ template<class INT>
+ void encG1(CipherTextG1& c, const INT& m) const
+ {
+ ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_);
+ }
+ template<class INT>
+ void encG2(CipherTextG2& c, const INT& m) const
+ {
+ ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_);
+ }
+ template<class INT>
+ void encGT(CipherTextGT& c, const INT& m) const
+ {
+ /*
+ (s, t, u, v) = (e^m e^(xya), (e^x)^b, (e^y)^c, e^(b + c - a))
+ */
+ Fr ra, rb, rc;
+ ra.setRand();
+ rb.setRand();
+ rc.setRand();
+ GT t;
+ ePQhashTbl_.mulByWindowMethod(c.g_[0], m); // e^m
+ mulByWindowMethod(t, exyPQwm_, ra); // (e^xy)^a
+ c.g_[0] *= t;
+ mulByWindowMethod(c.g_[1], exPQwm_, rb); // (e^x)^b
+ mulByWindowMethod(c.g_[2], eyPQwm_, rc); // (e^y)^c
+ rb += rc;
+ rb -= ra;
+ ePQhashTbl_.mulByWindowMethod(c.g_[3], rb);
+ }
+ public:
+ void init(const PublicKey& pub)
+ {
+ const size_t bitSize = Fr::getBitSize();
+ xPwm_.init(pub.xP_, bitSize, local::winSize);
+ if (isG1only_) return;
+ yQwm_.init(pub.yQ_, bitSize, local::winSize);
+ pairing(exPQ_, pub.xP_, Q_);
+ pairing(eyPQ_, P_, pub.yQ_);
+ pairing(exyPQ_, pub.xP_, pub.yQ_);
+ exPQwm_.init(static_cast<const GTasEC&>(exPQ_), bitSize, local::winSize);
+ eyPQwm_.init(static_cast<const GTasEC&>(eyPQ_), bitSize, local::winSize);
+ exyPQwm_.init(static_cast<const GTasEC&>(exyPQ_), bitSize, local::winSize);
+ }
+ void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const
+ {
+ Fr encRand;
+ encRand.setRand();
+ ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_, &encRand);
+ makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPwm_);
+ }
+ void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const
+ {
+ Fr encRand;
+ encRand.setRand();
+ ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_, &encRand);
+ makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQwm_);
+ }
+ bool verify(const CipherTextG1& c, const ZkpBin& zkp) const
+ {
+ return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPwm_);
+ }
+ bool verify(const CipherTextG2& c, const ZkpBin& zkp) const
+ {
+ return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQwm_);
+ }
+ template<class INT>
+ void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const
+ {
+ makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_);
+ }
+ bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const
+ {
+ return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_);
+ }
+ void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const
+ {
+ makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_);
+ }
+ bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const
+ {
+ return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_);
+ }
+ };
+ class CipherTextA {
+ CipherTextG1 c1_;
+ CipherTextG2 c2_;
+ friend class SecretKey;
+ friend class PublicKey;
+ friend class CipherTextGT;
+ template<class T>
+ friend struct PublicKeyMethod;
+ public:
+ void clear()
+ {
+ c1_.clear();
+ c2_.clear();
+ }
+ static void add(CipherTextA& z, const CipherTextA& x, const CipherTextA& y)
+ {
+ CipherTextG1::add(z.c1_, x.c1_, y.c1_);
+ CipherTextG2::add(z.c2_, x.c2_, y.c2_);
+ }
+ static void sub(CipherTextA& z, const CipherTextA& x, const CipherTextA& y)
+ {
+ CipherTextG1::sub(z.c1_, x.c1_, y.c1_);
+ CipherTextG2::sub(z.c2_, x.c2_, y.c2_);
+ }
+ static void mul(CipherTextA& z, const CipherTextA& x, int64_t y)
+ {
+ CipherTextG1::mul(z.c1_, x.c1_, y);
+ CipherTextG2::mul(z.c2_, x.c2_, y);
+ }
+ static void neg(CipherTextA& y, const CipherTextA& x)
+ {
+ CipherTextG1::neg(y.c1_, x.c1_);
+ CipherTextG2::neg(y.c2_, x.c2_);
+ }
+ void add(const CipherTextA& c) { add(*this, *this, c); }
+ void sub(const CipherTextA& c) { sub(*this, *this, c); }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ c1_.load(pb, is, ioMode); if (!*pb) return;
+ c2_.load(pb, is, ioMode);
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ c1_.save(pb, os, ioMode); if (!*pb) return;
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ c2_.save(pb, os, ioMode);
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextA:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextA:save");
+ }
+ friend std::istream& operator>>(std::istream& is, CipherTextA& self)
+ {
+ self.load(is, fp::detectIoMode(G1::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const CipherTextA& self)
+ {
+ self.save(os, fp::detectIoMode(G1::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const CipherTextA& rhs) const
+ {
+ return c1_ == rhs.c1_ && c2_ == rhs.c2_;
+ }
+ bool operator!=(const CipherTextA& rhs) const { return !operator==(rhs); }
+ };
+
+ class CipherTextGT : public fp::Serializable<CipherTextGT> {
+ GT g_[4];
+ friend class SecretKey;
+ friend class PublicKey;
+ friend class PrecomputedPublicKey;
+ friend class CipherTextA;
+ template<class T>
+ friend struct PublicKeyMethod;
+ public:
+ void clear()
+ {
+ for (int i = 0; i < 4; i++) {
+ g_[i].setOne();
+ }
+ }
+ static void neg(CipherTextGT& y, const CipherTextGT& x)
+ {
+ for (int i = 0; i < 4; i++) {
+ GT::unitaryInv(y.g_[i], x.g_[i]);
+ }
+ }
+ static void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y)
+ {
+ /*
+ (g[i]) + (g'[i]) = (g[i] * g'[i])
+ */
+ for (int i = 0; i < 4; i++) {
+ GT::mul(z.g_[i], x.g_[i], y.g_[i]);
+ }
+ }
+ static void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y)
+ {
+ /*
+ (g[i]) - (g'[i]) = (g[i] / g'[i])
+ */
+ GT t;
+ for (size_t i = 0; i < 4; i++) {
+ GT::unitaryInv(t, y.g_[i]);
+ GT::mul(z.g_[i], x.g_[i], t);
+ }
+ }
+ static void mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)
+ {
+ /*
+ (S1, T1) * (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2))
+ */
+ tensorProductML(z.g_, x.S_, x.T_, y.S_, y.T_);
+ }
+ static void finalExp(CipherTextGT& y, const CipherTextGT& x)
+ {
+ finalExp4(y.g_, x.g_);
+ }
+ /*
+ mul(x, y) = mulML(x, y) + finalExp
+ mul(c11, c12) + mul(c21, c22)
+ = finalExp(mulML(c11, c12) + mulML(c21, c22)),
+ then one finalExp can be reduced
+ */
+ static void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)
+ {
+ /*
+ (S1, T1) * (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2))
+ */
+ mulML(z, x, y);
+ finalExp(z, z);
+ }
+ static void mul(CipherTextGT& z, const CipherTextA& x, const CipherTextA& y)
+ {
+ mul(z, x.c1_, y.c2_);
+ }
+ static void mul(CipherTextGT& z, const CipherTextGT& x, int64_t y)
+ {
+ for (int i = 0; i < 4; i++) {
+ GT::pow(z.g_[i], x.g_[i], y);
+ }
+ }
+ void add(const CipherTextGT& c) { add(*this, *this, c); }
+ void sub(const CipherTextGT& c) { sub(*this, *this, c); }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ for (int i = 0; i < 4; i++) {
+ g_[i].load(pb, is, ioMode); if (!*pb) return;
+ }
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ const char sep = *fp::getIoSeparator(ioMode);
+ g_[0].save(pb, os, ioMode); if (!*pb) return;
+ for (int i = 1; i < 4; i++) {
+ if (sep) {
+ cybozu::writeChar(pb, os, sep);
+ if (!*pb) return;
+ }
+ g_[i].save(pb, os, ioMode); if (!*pb) return;
+ }
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextGT:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherTextGT:save");
+ }
+ friend std::istream& operator>>(std::istream& is, CipherTextGT& self)
+ {
+ self.load(is, fp::detectIoMode(G1::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const CipherTextGT& self)
+ {
+ self.save(os, fp::detectIoMode(G1::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const CipherTextGT& rhs) const
+ {
+ for (int i = 0; i < 4; i++) {
+ if (g_[i] != rhs.g_[i]) return false;
+ }
+ return true;
+ }
+ bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); }
+ };
+
+ class CipherText : public fp::Serializable<CipherText> {
+ bool isMultiplied_;
+ CipherTextA a_;
+ CipherTextGT m_;
+ friend class SecretKey;
+ friend class PublicKey;
+ template<class T>
+ friend struct PublicKeyMethod;
+ public:
+ CipherText() : isMultiplied_(false) {}
+ void clearAsAdded()
+ {
+ isMultiplied_ = false;
+ a_.clear();
+ }
+ void clearAsMultiplied()
+ {
+ isMultiplied_ = true;
+ m_.clear();
+ }
+ bool isMultiplied() const { return isMultiplied_; }
+ static void add(CipherText& z, const CipherText& x, const CipherText& y)
+ {
+ if (x.isMultiplied() && y.isMultiplied()) {
+ z.isMultiplied_ = true;
+ CipherTextGT::add(z.m_, x.m_, y.m_);
+ return;
+ }
+ if (!x.isMultiplied() && !y.isMultiplied()) {
+ z.isMultiplied_ = false;
+ CipherTextA::add(z.a_, x.a_, y.a_);
+ return;
+ }
+ throw cybozu::Exception("she:CipherText:add:mixed CipherText");
+ }
+ static void sub(CipherText& z, const CipherText& x, const CipherText& y)
+ {
+ if (x.isMultiplied() && y.isMultiplied()) {
+ z.isMultiplied_ = true;
+ CipherTextGT::sub(z.m_, x.m_, y.m_);
+ return;
+ }
+ if (!x.isMultiplied() && !y.isMultiplied()) {
+ z.isMultiplied_ = false;
+ CipherTextA::sub(z.a_, x.a_, y.a_);
+ return;
+ }
+ throw cybozu::Exception("she:CipherText:sub:mixed CipherText");
+ }
+ static void neg(CipherText& y, const CipherText& x)
+ {
+ if (x.isMultiplied()) {
+ y.isMultiplied_ = true;
+ CipherTextGT::neg(y.m_, x.m_);
+ return;
+ } else {
+ y.isMultiplied_ = false;
+ CipherTextA::neg(y.a_, x.a_);
+ return;
+ }
+ }
+ static void mul(CipherText& z, const CipherText& x, const CipherText& y)
+ {
+ if (x.isMultiplied() || y.isMultiplied()) {
+ throw cybozu::Exception("she:CipherText:mul:mixed CipherText");
+ }
+ z.isMultiplied_ = true;
+ CipherTextGT::mul(z.m_, x.a_, y.a_);
+ }
+ static void mul(CipherText& z, const CipherText& x, int64_t y)
+ {
+ if (x.isMultiplied()) {
+ CipherTextGT::mul(z.m_, x.m_, y);
+ } else {
+ CipherTextA::mul(z.a_, x.a_, y);
+ }
+ }
+ void add(const CipherText& c) { add(*this, *this, c); }
+ void sub(const CipherText& c) { sub(*this, *this, c); }
+ void mul(const CipherText& c) { mul(*this, *this, c); }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode = IoSerialize)
+ {
+ cybozu::writeChar(pb, isMultiplied_ ? '0' : '1', is); if (!*pb) return;
+ if (isMultiplied()) {
+ m_.load(pb, is, ioMode);
+ } else {
+ a_.load(pb, is, ioMode);
+ }
+ }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const
+ {
+ char c;
+ if (!cybozu::readChar(&c, os)) return;
+ if (c == '0' || c == '1') {
+ isMultiplied_ = c == '0';
+ } else {
+ *pb = false;
+ return;
+ }
+ if (isMultiplied()) {
+ m_.save(pb, os, ioMode);
+ } else {
+ a_.save(pb, os, ioMode);
+ }
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = IoSerialize)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherText:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int ioMode = IoSerialize) const
+ {
+ bool b;
+ save(&b, os, ioMode);
+ if (!b) throw cybozu::Exception("she:CipherText:save");
+ }
+ friend std::istream& operator>>(std::istream& is, CipherText& self)
+ {
+ self.load(is, fp::detectIoMode(G1::getIoMode(), is));
+ return is;
+ }
+ friend std::ostream& operator<<(std::ostream& os, const CipherText& self)
+ {
+ self.save(os, fp::detectIoMode(G1::getIoMode(), os));
+ return os;
+ }
+ bool operator==(const CipherTextGT& rhs) const
+ {
+ if (isMultiplied() != rhs.isMultiplied()) return false;
+ if (isMultiplied()) {
+ return m_ == rhs.m_;
+ }
+ return a_ == rhs.a_;
+ }
+ bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); }
+ };
+};
+typedef local::HashTable<G1> HashTableG1;
+typedef local::HashTable<G2> HashTableG2;
+typedef local::HashTable<Fp12, false> HashTableGT;
+
+template<size_t dummyInpl> G1 SHET<dummyInpl>::P_;
+template<size_t dummyInpl> G2 SHET<dummyInpl>::Q_;
+template<size_t dummyInpl> Fp12 SHET<dummyInpl>::ePQ_;
+template<size_t dummyInpl> std::vector<Fp6> SHET<dummyInpl>::Qcoeff_;
+template<size_t dummyInpl> HashTableG1 SHET<dummyInpl>::PhashTbl_;
+template<size_t dummyInpl> HashTableG2 SHET<dummyInpl>::QhashTbl_;
+template<size_t dummyInpl> HashTableGT SHET<dummyInpl>::ePQhashTbl_;
+template<size_t dummyInpl> bool SHET<dummyInpl>::useDecG1ViaGT_;
+template<size_t dummyInpl> bool SHET<dummyInpl>::useDecG2ViaGT_;
+template<size_t dummyInpl> bool SHET<dummyInpl>::isG1only_;
+typedef mcl::she::SHET<> SHE;
+typedef SHE::SecretKey SecretKey;
+typedef SHE::PublicKey PublicKey;
+typedef SHE::PrecomputedPublicKey PrecomputedPublicKey;
+typedef SHE::CipherTextG1 CipherTextG1;
+typedef SHE::CipherTextG2 CipherTextG2;
+typedef SHE::CipherTextGT CipherTextGT;
+typedef SHE::CipherTextA CipherTextA;
+typedef CipherTextGT CipherTextGM; // old class
+typedef SHE::CipherText CipherText;
+typedef SHE::ZkpBin ZkpBin;
+typedef SHE::ZkpEq ZkpEq;
+typedef SHE::ZkpBinEq ZkpBinEq;
+
+inline void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum)
+{
+ SHE::init(cp, hashSize, tryNum);
+}
+inline void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum)
+{
+ SHE::initG1only(para, hashSize, tryNum);
+}
+inline void init(size_t hashSize, size_t tryNum = local::defaultTryNum) { SHE::init(hashSize, tryNum); }
+inline void setRangeForG1DLP(size_t hashSize) { SHE::setRangeForG1DLP(hashSize); }
+inline void setRangeForG2DLP(size_t hashSize) { SHE::setRangeForG2DLP(hashSize); }
+inline void setRangeForGTDLP(size_t hashSize) { SHE::setRangeForGTDLP(hashSize); }
+inline void setRangeForDLP(size_t hashSize) { SHE::setRangeForDLP(hashSize); }
+inline void setTryNum(size_t tryNum) { SHE::setTryNum(tryNum); }
+inline void useDecG1ViaGT(bool use = true) { SHE::useDecG1ViaGT(use); }
+inline void useDecG2ViaGT(bool use = true) { SHE::useDecG2ViaGT(use); }
+inline HashTableG1& getHashTableG1() { return SHE::PhashTbl_; }
+inline HashTableG2& getHashTableG2() { return SHE::QhashTbl_; }
+inline HashTableGT& getHashTableGT() { return SHE::ePQhashTbl_; }
+
+inline void add(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::add(z, x, y); }
+inline void add(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::add(z, x, y); }
+inline void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::add(z, x, y); }
+inline void add(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::add(z, x, y); }
+
+inline void sub(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::sub(z, x, y); }
+inline void sub(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::sub(z, x, y); }
+inline void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::sub(z, x, y); }
+inline void sub(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::sub(z, x, y); }
+
+inline void neg(CipherTextG1& y, const CipherTextG1& x) { CipherTextG1::neg(y, x); }
+inline void neg(CipherTextG2& y, const CipherTextG2& x) { CipherTextG2::neg(y, x); }
+inline void neg(CipherTextGT& y, const CipherTextGT& x) { CipherTextGT::neg(y, x); }
+inline void neg(CipherText& y, const CipherText& x) { CipherText::neg(y, x); }
+
+template<class INT>
+inline void mul(CipherTextG1& z, const CipherTextG1& x, const INT& y) { CipherTextG1::mul(z, x, y); }
+template<class INT>
+inline void mul(CipherTextG2& z, const CipherTextG2& x, const INT& y) { CipherTextG2::mul(z, x, y); }
+template<class INT>
+inline void mul(CipherTextGT& z, const CipherTextGT& x, const INT& y) { CipherTextGT::mul(z, x, y); }
+template<class INT>
+inline void mul(CipherText& z, const CipherText& x, const INT& y) { CipherText::mul(z, x, y); }
+
+inline void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) { CipherTextGT::mul(z, x, y); }
+inline void mul(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::mul(z, x, y); }
+
+} } // mcl::she
+
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/util.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/util.hpp
new file mode 100644
index 000000000..edef971cb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/util.hpp
@@ -0,0 +1,285 @@
+#pragma once
+/**
+ @file
+ @brief functions for T[]
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+*/
+#include <cybozu/bit_operation.hpp>
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable : 4456)
+ #pragma warning(disable : 4459)
+#endif
+
+namespace mcl { namespace fp {
+
+template<class T>
+T abs_(T x) { return x < 0 ? -x : x; }
+
+template<class T>
+T min_(T x, T y) { return x < y ? x : y; }
+
+template<class T>
+T max_(T x, T y) { return x < y ? y : x; }
+
+template<class T>
+void swap_(T& x, T& y)
+{
+ T t;
+ t = x;
+ x = y;
+ y = t;
+}
+
+
+/*
+ get pp such that p * pp = -1 mod M,
+ where p is prime and M = 1 << 64(or 32).
+ @param pLow [in] p mod M
+*/
+template<class T>
+T getMontgomeryCoeff(T pLow)
+{
+ T ret = 0;
+ T t = 0;
+ T x = 1;
+ for (size_t i = 0; i < sizeof(T) * 8; i++) {
+ if ((t & 1) == 0) {
+ t += pLow;
+ ret += x;
+ }
+ t >>= 1;
+ x <<= 1;
+ }
+ return ret;
+}
+
+template<class T>
+int compareArray(const T* x, const T* y, size_t n)
+{
+ for (size_t i = n - 1; i != size_t(-1); i--) {
+ T a = x[i];
+ T b = y[i];
+ if (a != b) return a < b ? -1 : 1;
+ }
+ return 0;
+}
+
+template<class T>
+bool isLessArray(const T *x, const T* y, size_t n)
+{
+ for (size_t i = n - 1; i != size_t(-1); i--) {
+ T a = x[i];
+ T b = y[i];
+ if (a != b) return a < b;
+ }
+ return false;
+}
+
+template<class T>
+bool isGreaterOrEqualArray(const T *x, const T* y, size_t n)
+{
+ return !isLessArray(x, y, n);
+}
+
+template<class T>
+bool isLessOrEqualArray(const T *x, const T* y, size_t n)
+{
+ for (size_t i = n - 1; i != size_t(-1); i--) {
+ T a = x[i];
+ T b = y[i];
+ if (a != b) return a < b;
+ }
+ return true;
+}
+
+template<class T>
+bool isGreaterArray(const T *x, const T* y, size_t n)
+{
+ return !isLessOrEqualArray(x, y, n);
+}
+
+template<class T>
+bool isEqualArray(const T* x, const T* y, size_t n)
+{
+ for (size_t i = 0; i < n; i++) {
+ if (x[i] != y[i]) return false;
+ }
+ return true;
+}
+
+template<class T>
+bool isZeroArray(const T *x, size_t n)
+{
+ for (size_t i = 0; i < n; i++) {
+ if (x[i]) return false;
+ }
+ return true;
+}
+
+template<class T>
+void clearArray(T *x, size_t begin, size_t end)
+{
+ for (size_t i = begin; i < end; i++) x[i] = 0;
+}
+
+template<class T>
+void copyArray(T *y, const T *x, size_t n)
+{
+ for (size_t i = 0; i < n; i++) y[i] = x[i];
+}
+
+/*
+ x &= (1 << bitSize) - 1
+*/
+template<class T>
+void maskArray(T *x, size_t n, size_t bitSize)
+{
+ const size_t TbitSize = sizeof(T) * 8;
+ assert(bitSize <= TbitSize * n);
+ const size_t q = bitSize / TbitSize;
+ const size_t r = bitSize % TbitSize;
+ if (r) {
+ x[q] &= (T(1) << r) - 1;
+ clearArray(x, q + 1, n);
+ } else {
+ clearArray(x, q, n);
+ }
+}
+
+/*
+ return non zero size of x[]
+ return 1 if x[] == 0
+*/
+template<class T>
+size_t getNonZeroArraySize(const T *x, size_t n)
+{
+ assert(n > 0);
+ while (n > 0) {
+ if (x[n - 1]) return n;
+ n--;
+ }
+ return 1;
+}
+
+/*
+ @param out [inout] : set element of G ; out = x^y[]
+ @param x [in]
+ @param y [in]
+ @param n [in] size of y[]
+ @param limitBit [in] const time version if the value is positive
+ @note &out != x and out = the unit element of G
+*/
+template<class G, class Mul, class Sqr, class T>
+void powGeneric(G& out, const G& x, const T *y, size_t n, const Mul& mul, const Sqr& sqr, void normalize(G&, const G&), size_t limitBit = 0)
+{
+ assert(&out != &x);
+ G tbl[4]; // tbl = { discard, x, x^2, x^3 }
+ T v;
+ bool constTime = limitBit > 0;
+ int maxBit = 0;
+ int m = 0;
+ while (n > 0) {
+ if (y[n - 1]) break;
+ n--;
+ }
+ if (n == 0) {
+ if (constTime) goto DummyLoop;
+ return;
+ }
+ if (!constTime && n == 1) {
+ switch (y[0]) {
+ case 1:
+ out = x;
+ return;
+ case 2:
+ sqr(out, x);
+ return;
+ case 3:
+ sqr(out, x);
+ mul(out, out, x);
+ return;
+ case 4:
+ sqr(out, x);
+ sqr(out, out);
+ return;
+ }
+ }
+ if (normalize != 0) {
+ normalize(tbl[0], x);
+ } else {
+ tbl[0] = x;
+ }
+ tbl[1] = tbl[0];
+ sqr(tbl[2], tbl[1]);
+ if (normalize != 0) { normalize(tbl[2], tbl[2]); }
+ mul(tbl[3], tbl[2], x);
+ if (normalize != 0) { normalize(tbl[3], tbl[3]); }
+ v = y[n - 1];
+ assert(v);
+ m = cybozu::bsr<T>(v);
+ maxBit = int(m + (n - 1) * sizeof(T) * 8);
+ if (m & 1) {
+ m--;
+ T idx = (v >> m) & 3;
+ assert(idx > 0);
+ out = tbl[idx];
+ } else {
+ out = x;
+ }
+ for (int i = (int)n - 1; i >= 0; i--) {
+ T v = y[i];
+ for (int j = m - 2; j >= 0; j -= 2) {
+ sqr(out, out);
+ sqr(out, out);
+ T idx = (v >> j) & 3;
+ if (idx == 0) {
+ if (constTime) mul(tbl[0], tbl[0], tbl[1]);
+ } else {
+ mul(out, out, tbl[idx]);
+ }
+ }
+ m = (int)sizeof(T) * 8;
+ }
+DummyLoop:
+ if (!constTime) return;
+ G D = out;
+ for (size_t i = maxBit + 1; i < limitBit; i += 2) {
+ sqr(D, D);
+ sqr(D, D);
+ mul(D, D, tbl[1]);
+ }
+}
+
+/*
+ shortcut of multiplication by Unit
+*/
+template<class T, class U>
+bool mulSmallUnit(T& z, const T& x, U y)
+{
+ switch (y) {
+ case 0: z.clear(); break;
+ case 1: z = x; break;
+ case 2: T::add(z, x, x); break;
+ case 3: { T t; T::add(t, x, x); T::add(z, t, x); break; }
+ case 4: T::add(z, x, x); T::add(z, z, z); break;
+ case 5: { T t; T::add(t, x, x); T::add(t, t, t); T::add(z, t, x); break; }
+ case 6: { T t; T::add(t, x, x); T::add(t, t, x); T::add(z, t, t); break; }
+ case 7: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::sub(z, t, x); break; }
+ case 8: T::add(z, x, x); T::add(z, z, z); T::add(z, z, z); break;
+ case 9: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::add(z, t, x); break; }
+ case 10: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, x); T::add(z, t, t); break; }
+ default:
+ return false;
+ }
+ return true;
+}
+
+} } // mcl::fp
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/vint.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/vint.hpp
new file mode 100644
index 000000000..b087688c3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/vint.hpp
@@ -0,0 +1,1987 @@
+#pragma once
+/**
+ emulate mpz_class
+*/
+#include <cybozu/exception.hpp>
+#include <cybozu/bit_operation.hpp>
+#include <cybozu/xorshift.hpp>
+#include <assert.h>
+#ifndef CYBOZU_DONT_USE_STRING
+#include <iostream>
+#endif
+#include <mcl/array.hpp>
+#include <mcl/util.hpp>
+#include <mcl/randgen.hpp>
+#include <mcl/conversion.hpp>
+
+#if defined(__EMSCRIPTEN__) || defined(__wasm__)
+ #define MCL_VINT_64BIT_PORTABLE
+ #define MCL_VINT_FIXED_BUFFER
+#endif
+#ifndef MCL_MAX_BIT_SIZE
+ #define MCL_MAX_BIT_SIZE 384
+#endif
+
+#ifndef MCL_SIZEOF_UNIT
+ #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32)
+ #define MCL_SIZEOF_UNIT 4
+ #else
+ #define MCL_SIZEOF_UNIT 8
+ #endif
+#endif
+
+namespace mcl {
+
+namespace vint {
+
+#if MCL_SIZEOF_UNIT == 8
+typedef uint64_t Unit;
+#else
+typedef uint32_t Unit;
+#endif
+
+template<class T>
+void dump(const T *x, size_t n, const char *msg = "")
+{
+ const size_t is4byteUnit = sizeof(*x) == 4;
+ if (msg) printf("%s ", msg);
+ for (size_t i = 0; i < n; i++) {
+ if (is4byteUnit) {
+ printf("%08x", (uint32_t)x[n - 1 - i]);
+ } else {
+ printf("%016llx", (unsigned long long)x[n - 1 - i]);
+ }
+ }
+ printf("\n");
+}
+
+inline uint64_t make64(uint32_t H, uint32_t L)
+{
+ return ((uint64_t)H << 32) | L;
+}
+
+inline void split64(uint32_t *H, uint32_t *L, uint64_t x)
+{
+ *H = uint32_t(x >> 32);
+ *L = uint32_t(x);
+}
+
+/*
+ [H:L] <= x * y
+ @return L
+*/
+inline uint32_t mulUnit(uint32_t *pH, uint32_t x, uint32_t y)
+{
+ uint64_t t = uint64_t(x) * y;
+ uint32_t L;
+ split64(pH, &L, t);
+ return L;
+}
+#if MCL_SIZEOF_UNIT == 8
+inline uint64_t mulUnit(uint64_t *pH, uint64_t x, uint64_t y)
+{
+#ifdef MCL_VINT_64BIT_PORTABLE
+ uint32_t a = uint32_t(x >> 32);
+ uint32_t b = uint32_t(x);
+ uint32_t c = uint32_t(y >> 32);
+ uint32_t d = uint32_t(y);
+
+ uint64_t ad = uint64_t(d) * a;
+ uint64_t bd = uint64_t(d) * b;
+ uint64_t L = uint32_t(bd);
+ ad += bd >> 32; // [ad:L]
+
+ uint64_t ac = uint64_t(c) * a;
+ uint64_t bc = uint64_t(c) * b;
+ uint64_t H = uint32_t(bc);
+ ac += bc >> 32; // [ac:H]
+ /*
+ adL
+ acH
+ */
+ uint64_t t = (ac << 32) | H;
+ ac >>= 32;
+ H = t + ad;
+ if (H < t) {
+ ac++;
+ }
+ /*
+ ac:H:L
+ */
+ L |= H << 32;
+ H = (ac << 32) | uint32_t(H >> 32);
+ *pH = H;
+ return L;
+#elif defined(_WIN64) && !defined(__INTEL_COMPILER)
+ return _umul128(x, y, pH);
+#else
+ typedef __attribute__((mode(TI))) unsigned int uint128;
+ uint128 t = uint128(x) * y;
+ *pH = uint64_t(t >> 64);
+ return uint64_t(t);
+#endif
+}
+#endif
+
+template<class T>
+void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn);
+
+/*
+ q = [H:L] / y
+ r = [H:L] % y
+ return q
+*/
+inline uint32_t divUnit(uint32_t *pr, uint32_t H, uint32_t L, uint32_t y)
+{
+ uint64_t t = make64(H, L);
+ uint32_t q = uint32_t(t / y);
+ *pr = uint32_t(t % y);
+ return q;
+}
+#if MCL_SIZEOF_UNIT == 8
+inline uint64_t divUnit(uint64_t *pr, uint64_t H, uint64_t L, uint64_t y)
+{
+#if defined(MCL_VINT_64BIT_PORTABLE)
+ uint32_t px[4] = { uint32_t(L), uint32_t(L >> 32), uint32_t(H), uint32_t(H >> 32) };
+ uint32_t py[2] = { uint32_t(y), uint32_t(y >> 32) };
+ size_t xn = 4;
+ size_t yn = 2;
+ uint32_t q[4];
+ uint32_t r[2];
+ size_t qn = xn - yn + 1;
+ divNM(q, qn, r, px, xn, py, yn);
+ *pr = make64(r[1], r[0]);
+ return make64(q[1], q[0]);
+#elif defined(_MSC_VER)
+ #error "divUnit for uint64_t is not supported"
+#else
+ typedef __attribute__((mode(TI))) unsigned int uint128;
+ uint128 t = (uint128(H) << 64) | L;
+ uint64_t q = uint64_t(t / y);
+ *pr = uint64_t(t % y);
+ return q;
+#endif
+}
+#endif
+
+/*
+ compare x[] and y[]
+ @retval positive if x > y
+ @retval 0 if x == y
+ @retval negative if x < y
+*/
+template<class T>
+int compareNM(const T *x, size_t xn, const T *y, size_t yn)
+{
+ assert(xn > 0 && yn > 0);
+ if (xn != yn) return xn > yn ? 1 : -1;
+ for (int i = (int)xn - 1; i >= 0; i--) {
+ if (x[i] != y[i]) return x[i] > y[i] ? 1 : -1;
+ }
+ return 0;
+}
+
+template<class T>
+void clearN(T *x, size_t n)
+{
+ for (size_t i = 0; i < n; i++) x[i] = 0;
+}
+
+template<class T>
+void copyN(T *y, const T *x, size_t n)
+{
+ for (size_t i = 0; i < n; i++) y[i] = x[i];
+}
+
+/*
+ z[] = x[n] + y[n]
+ @note return 1 if having carry
+ z may be equal to x or y
+*/
+template<class T>
+T addN(T *z, const T *x, const T *y, size_t n)
+{
+ T c = 0;
+ for (size_t i = 0; i < n; i++) {
+ T xc = x[i] + c;
+ if (xc < c) {
+ // x[i] = Unit(-1) and c = 1
+ z[i] = y[i];
+ } else {
+ xc += y[i];
+ c = y[i] > xc ? 1 : 0;
+ z[i] = xc;
+ }
+ }
+ return c;
+}
+
+/*
+ z[] = x[] + y
+*/
+template<class T>
+T addu1(T *z, const T *x, size_t n, T y)
+{
+ assert(n > 0);
+ T t = x[0] + y;
+ z[0] = t;
+ size_t i = 0;
+ if (t >= y) goto EXIT_0;
+ i = 1;
+ for (; i < n; i++) {
+ t = x[i] + 1;
+ z[i] = t;
+ if (t != 0) goto EXIT_0;
+ }
+ return 1;
+EXIT_0:
+ i++;
+ for (; i < n; i++) {
+ z[i] = x[i];
+ }
+ return 0;
+}
+
+/*
+ x[] += y
+*/
+template<class T>
+T addu1(T *x, size_t n, T y)
+{
+ assert(n > 0);
+ T t = x[0] + y;
+ x[0] = t;
+ size_t i = 0;
+ if (t >= y) return 0;
+ i = 1;
+ for (; i < n; i++) {
+ t = x[i] + 1;
+ x[i] = t;
+ if (t != 0) return 0;
+ }
+ return 1;
+}
+/*
+ z[zn] = x[xn] + y[yn]
+ @note zn = max(xn, yn)
+*/
+template<class T>
+T addNM(T *z, const T *x, size_t xn, const T *y, size_t yn)
+{
+ if (yn > xn) {
+ fp::swap_(xn, yn);
+ fp::swap_(x, y);
+ }
+ assert(xn >= yn);
+ size_t max = xn;
+ size_t min = yn;
+ T c = vint::addN(z, x, y, min);
+ if (max > min) {
+ c = vint::addu1(z + min, x + min, max - min, c);
+ }
+ return c;
+}
+
+/*
+ z[] = x[n] - y[n]
+ z may be equal to x or y
+*/
+template<class T>
+T subN(T *z, const T *x, const T *y, size_t n)
+{
+ assert(n > 0);
+ T c = 0;
+ for (size_t i = 0; i < n; i++) {
+ T yc = y[i] + c;
+ if (yc < c) {
+ // y[i] = T(-1) and c = 1
+ z[i] = x[i];
+ } else {
+ c = x[i] < yc ? 1 : 0;
+ z[i] = x[i] - yc;
+ }
+ }
+ return c;
+}
+
+/*
+ out[] = x[n] - y
+*/
+template<class T>
+T subu1(T *z, const T *x, size_t n, T y)
+{
+ assert(n > 0);
+#if 0
+ T t = x[0];
+ z[0] = t - y;
+ size_t i = 0;
+ if (t >= y) goto EXIT_0;
+ i = 1;
+ for (; i < n; i++ ){
+ t = x[i];
+ z[i] = t - 1;
+ if (t != 0) goto EXIT_0;
+ }
+ return 1;
+EXIT_0:
+ i++;
+ for (; i < n; i++) {
+ z[i] = x[i];
+ }
+ return 0;
+#else
+ T c = x[0] < y ? 1 : 0;
+ z[0] = x[0] - y;
+ for (size_t i = 1; i < n; i++) {
+ if (x[i] < c) {
+ z[i] = T(-1);
+ } else {
+ z[i] = x[i] - c;
+ c = 0;
+ }
+ }
+ return c;
+#endif
+}
+
+/*
+ z[xn] = x[xn] - y[yn]
+ @note xn >= yn
+*/
+template<class T>
+T subNM(T *z, const T *x, size_t xn, const T *y, size_t yn)
+{
+ assert(xn >= yn);
+ T c = vint::subN(z, x, y, yn);
+ if (xn > yn) {
+ c = vint::subu1(z + yn, x + yn, xn - yn, c);
+ }
+ return c;
+}
+
+/*
+ z[0..n) = x[0..n) * y
+ return z[n]
+ @note accept z == x
+*/
+template<class T>
+T mulu1(T *z, const T *x, size_t n, T y)
+{
+ assert(n > 0);
+ T H = 0;
+ for (size_t i = 0; i < n; i++) {
+ T t = H;
+ T L = mulUnit(&H, x[i], y);
+ z[i] = t + L;
+ if (z[i] < t) {
+ H++;
+ }
+ }
+ return H; // z[n]
+}
+
+/*
+ z[xn * yn] = x[xn] * y[ym]
+*/
+template<class T>
+static inline void mulNM(T *z, const T *x, size_t xn, const T *y, size_t yn)
+{
+ assert(xn > 0 && yn > 0);
+ if (yn > xn) {
+ fp::swap_(yn, xn);
+ fp::swap_(x, y);
+ }
+ assert(xn >= yn);
+ if (z == x) {
+ T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * xn);
+ copyN(p, x, xn);
+ x = p;
+ }
+ if (z == y) {
+ T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * yn);
+ copyN(p, y, yn);
+ y = p;
+ }
+ z[xn] = vint::mulu1(&z[0], x, xn, y[0]);
+ clearN(z + xn + 1, yn - 1);
+
+ T *t2 = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1));
+ for (size_t i = 1; i < yn; i++) {
+ t2[xn] = vint::mulu1(&t2[0], x, xn, y[i]);
+ vint::addN(&z[i], &z[i], &t2[0], xn + 1);
+ }
+}
+/*
+ out[xn * 2] = x[xn] * x[xn]
+ QQQ : optimize this
+*/
+template<class T>
+static inline void sqrN(T *y, const T *x, size_t xn)
+{
+ mulNM(y, x, xn, x, xn);
+}
+
+/*
+ q[] = x[] / y
+ @retval r = x[] % y
+ accept q == x
+*/
+template<class T>
+T divu1(T *q, const T *x, size_t n, T y)
+{
+ T r = 0;
+ for (int i = (int)n - 1; i >= 0; i--) {
+ q[i] = divUnit(&r, r, x[i], y);
+ }
+ return r;
+}
+/*
+ q[] = x[] / y
+ @retval r = x[] % y
+*/
+template<class T>
+T modu1(const T *x, size_t n, T y)
+{
+ T r = 0;
+ for (int i = (int)n - 1; i >= 0; i--) {
+ divUnit(&r, r, x[i], y);
+ }
+ return r;
+}
+
+/*
+ y[] = x[] << bit
+ 0 < bit < sizeof(T) * 8
+ accept y == x
+*/
+template<class T>
+T shlBit(T *y, const T *x, size_t xn, size_t bit)
+{
+ assert(0 < bit && bit < sizeof(T) * 8);
+ assert(xn > 0);
+ size_t rBit = sizeof(T) * 8 - bit;
+ T keep = x[xn - 1];
+ T prev = keep;
+ for (size_t i = xn - 1; i > 0; i--) {
+ T t = x[i - 1];
+ y[i] = (prev << bit) | (t >> rBit);
+ prev = t;
+ }
+ y[0] = prev << bit;
+ return keep >> rBit;
+}
+
+/*
+ y[yn] = x[xn] << bit
+ yn = xn + (bit + unitBitBit - 1) / unitBitSize
+ accept y == x
+*/
+template<class T>
+void shlN(T *y, const T *x, size_t xn, size_t bit)
+{
+ assert(xn > 0);
+ const size_t unitBitSize = sizeof(T) * 8;
+ size_t q = bit / unitBitSize;
+ size_t r = bit % unitBitSize;
+ if (r == 0) {
+ // don't use copyN(y + q, x, xn); if overlaped
+ for (size_t i = 0; i < xn; i++) {
+ y[q + xn - 1 - i] = x[xn - 1 - i];
+ }
+ } else {
+ y[q + xn] = shlBit(y + q, x, xn, r);
+ }
+ clearN(y, q);
+}
+
+/*
+ y[] = x[] >> bit
+ 0 < bit < sizeof(T) * 8
+*/
+template<class T>
+void shrBit(T *y, const T *x, size_t xn, size_t bit)
+{
+ assert(0 < bit && bit < sizeof(T) * 8);
+ assert(xn > 0);
+ size_t rBit = sizeof(T) * 8 - bit;
+ T prev = x[0];
+ for (size_t i = 1; i < xn; i++) {
+ T t = x[i];
+ y[i - 1] = (prev >> bit) | (t << rBit);
+ prev = t;
+ }
+ y[xn - 1] = prev >> bit;
+}
+/*
+ y[yn] = x[xn] >> bit
+ yn = xn - bit / unitBit
+*/
+template<class T>
+void shrN(T *y, const T *x, size_t xn, size_t bit)
+{
+ assert(xn > 0);
+ const size_t unitBitSize = sizeof(T) * 8;
+ size_t q = bit / unitBitSize;
+ size_t r = bit % unitBitSize;
+ assert(xn >= q);
+ if (r == 0) {
+ copyN(y, x + q, xn - q);
+ } else {
+ shrBit(y, x + q, xn - q, r);
+ }
+}
+
+template<class T>
+size_t getRealSize(const T *x, size_t xn)
+{
+ int i = (int)xn - 1;
+ for (; i > 0; i--) {
+ if (x[i]) {
+ return i + 1;
+ }
+ }
+ return 1;
+}
+
+template<class T>
+size_t getBitSize(const T *x, size_t n)
+{
+ if (n == 1 && x[0] == 0) return 1;
+ T v = x[n - 1];
+ assert(v);
+ return (n - 1) * sizeof(T) * 8 + 1 + cybozu::bsr<Unit>(v);
+}
+
+/*
+ q[qn] = x[xn] / y[yn] ; qn == xn - yn + 1 if xn >= yn if q
+ r[rn] = x[xn] % y[yn] ; rn = yn before getRealSize
+ allow q == 0
+*/
+template<class T>
+void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn)
+{
+ assert(xn > 0 && yn > 0);
+ assert(xn < yn || (q == 0 || qn == xn - yn + 1));
+ assert(q != r);
+ const size_t rn = yn;
+ xn = getRealSize(x, xn);
+ yn = getRealSize(y, yn);
+ if (x == y) {
+ assert(xn == yn);
+ x_is_y:
+ clearN(r, rn);
+ if (q) {
+ q[0] = 1;
+ clearN(q + 1, qn - 1);
+ }
+ return;
+ }
+ if (yn > xn) {
+ /*
+ if y > x then q = 0 and r = x
+ */
+ q_is_zero:
+ copyN(r, x, xn);
+ clearN(r + xn, rn - xn);
+ if (q) clearN(q, qn);
+ return;
+ }
+ if (yn == 1) {
+ T t;
+ if (q) {
+ if (qn > xn) {
+ clearN(q + xn, qn - xn);
+ }
+ t = divu1(q, x, xn, y[0]);
+ } else {
+ t = modu1(x, xn, y[0]);
+ }
+ r[0] = t;
+ clearN(r + 1, rn - 1);
+ return;
+ }
+ const size_t yTopBit = cybozu::bsr(y[yn - 1]);
+ assert(yn >= 2);
+ if (xn == yn) {
+ const size_t xTopBit = cybozu::bsr(x[xn - 1]);
+ if (xTopBit < yTopBit) goto q_is_zero;
+ if (yTopBit == xTopBit) {
+ int ret = compareNM(x, xn, y, yn);
+ if (ret == 0) goto x_is_y;
+ if (ret < 0) goto q_is_zero;
+ if (r) {
+ subN(r, x, y, yn);
+ }
+ if (q) {
+ q[0] = 1;
+ clearN(q + 1, qn - 1);
+ }
+ return;
+ }
+ assert(xTopBit > yTopBit);
+ // fast reduction for larger than fullbit-3 size p
+ if (yTopBit >= sizeof(T) * 8 - 4) {
+ T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * xn);
+ T qv = 0;
+ if (yTopBit == sizeof(T) * 8 - 2) {
+ copyN(xx, x, xn);
+ } else {
+ qv = x[xn - 1] >> (yTopBit + 1);
+ mulu1(xx, y, yn, qv);
+ subN(xx, x, xx, xn);
+ xn = getRealSize(xx, xn);
+ }
+ for (;;) {
+ T ret = subN(xx, xx, y, yn);
+ if (ret) {
+ addN(xx, xx, y, yn);
+ break;
+ }
+ qv++;
+ xn = getRealSize(xx, xn);
+ }
+ if (r) {
+ copyN(r, xx, xn);
+ clearN(r + xn, rn - xn);
+ }
+ if (q) {
+ q[0] = qv;
+ clearN(q + 1, qn - 1);
+ }
+ return;
+ }
+ }
+ /*
+ bitwise left shift x and y to adjust MSB of y[yn - 1] = 1
+ */
+ const size_t shift = sizeof(T) * 8 - 1 - yTopBit;
+ T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1));
+ const T *yy;
+ if (shift) {
+ T v = shlBit(xx, x, xn, shift);
+ if (v) {
+ xx[xn] = v;
+ xn++;
+ }
+ T *yBuf = (T*)CYBOZU_ALLOCA(sizeof(T) * yn);
+ shlBit(yBuf, y, yn ,shift);
+ yy = yBuf;
+ } else {
+ copyN(xx, x, xn);
+ yy = y;
+ }
+ if (q) {
+ clearN(q, qn);
+ }
+ assert((yy[yn - 1] >> (sizeof(T) * 8 - 1)) != 0);
+ T *tt = (T*)CYBOZU_ALLOCA(sizeof(T) * (yn + 1));
+ while (xn > yn) {
+ size_t d = xn - yn;
+ T xTop = xx[xn - 1];
+ T yTop = yy[yn - 1];
+ if (xTop > yTop || (compareNM(xx + d, xn - d, yy, yn) >= 0)) {
+ vint::subN(xx + d, xx + d, yy, yn);
+ xn = getRealSize(xx, xn);
+ if (q) vint::addu1<T>(q + d, qn - d, 1);
+ continue;
+ }
+ if (xTop == 1) {
+ vint::subNM(xx + d - 1, xx + d - 1, xn - d + 1, yy, yn);
+ xn = getRealSize(xx, xn);
+ if (q) vint::addu1<T>(q + d - 1, qn - d + 1, 1);
+ continue;
+ }
+ tt[yn] = vint::mulu1(tt, yy, yn, xTop);
+ vint::subN(xx + d - 1, xx + d - 1, tt, yn + 1);
+ xn = getRealSize(xx, xn);
+ if (q) vint::addu1<T>(q + d - 1, qn - d + 1, xTop);
+ }
+ if (xn == yn && compareNM(xx, xn, yy, yn) >= 0) {
+ subN(xx, xx, yy, yn);
+ xn = getRealSize(xx, xn);
+ if (q) vint::addu1<T>(q, qn, 1);
+ }
+ if (shift) {
+ shrBit(r, xx, xn, shift);
+ } else {
+ copyN(r, xx, xn);
+ }
+ clearN(r + xn, rn - xn);
+}
+
+#ifndef MCL_VINT_FIXED_BUFFER
+template<class T>
+class Buffer {
+ size_t allocSize_;
+ T *ptr_;
+public:
+ typedef T Unit;
+ Buffer() : allocSize_(0), ptr_(0) {}
+ ~Buffer()
+ {
+ clear();
+ }
+ Buffer(const Buffer& rhs)
+ : allocSize_(rhs.allocSize_)
+ , ptr_(0)
+ {
+ ptr_ = (T*)malloc(allocSize_ * sizeof(T));
+ if (ptr_ == 0) throw cybozu::Exception("Buffer:malloc") << rhs.allocSize_;
+ memcpy(ptr_, rhs.ptr_, allocSize_ * sizeof(T));
+ }
+ Buffer& operator=(const Buffer& rhs)
+ {
+ Buffer t(rhs);
+ swap(t);
+ return *this;
+ }
+ void swap(Buffer& rhs)
+#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+ noexcept
+#endif
+ {
+ fp::swap_(allocSize_, rhs.allocSize_);
+ fp::swap_(ptr_, rhs.ptr_);
+ }
+ void clear()
+ {
+ allocSize_ = 0;
+ free(ptr_);
+ ptr_ = 0;
+ }
+
+ /*
+ @note extended buffer may be not cleared
+ */
+ void alloc(bool *pb, size_t n)
+ {
+ if (n > allocSize_) {
+ T *p = (T*)malloc(n * sizeof(T));
+ if (p == 0) {
+ *pb = false;
+ return;
+ }
+ copyN(p, ptr_, allocSize_);
+ free(ptr_);
+ ptr_ = p;
+ allocSize_ = n;
+ }
+ *pb = true;
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void alloc(size_t n)
+ {
+ bool b;
+ alloc(&b, n);
+ if (!b) throw cybozu::Exception("Buffer:alloc");
+ }
+#endif
+ /*
+ *this = rhs
+ rhs may be destroyed
+ */
+ const T& operator[](size_t n) const { return ptr_[n]; }
+ T& operator[](size_t n) { return ptr_[n]; }
+};
+#endif
+
+template<class T, size_t BitLen>
+class FixedBuffer {
+ enum {
+ N = (BitLen + sizeof(T) * 8 - 1) / (sizeof(T) * 8)
+ };
+ size_t size_;
+ T v_[N];
+public:
+ typedef T Unit;
+ FixedBuffer()
+ : size_(0)
+ {
+ }
+ FixedBuffer(const FixedBuffer& rhs)
+ {
+ operator=(rhs);
+ }
+ FixedBuffer& operator=(const FixedBuffer& rhs)
+ {
+ size_ = rhs.size_;
+ for (size_t i = 0; i < size_; i++) {
+ v_[i] = rhs.v_[i];
+ }
+ return *this;
+ }
+ void clear() { size_ = 0; }
+ void alloc(bool *pb, size_t n)
+ {
+ if (n > N) {
+ *pb = false;
+ return;
+ }
+ size_ = n;
+ *pb = true;
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void alloc(size_t n)
+ {
+ bool b;
+ alloc(&b, n);
+ if (!b) throw cybozu::Exception("FixedBuffer:alloc");
+ }
+#endif
+ void swap(FixedBuffer& rhs)
+ {
+ FixedBuffer *p1 = this;
+ FixedBuffer *p2 = &rhs;
+ if (p1->size_ < p2->size_) {
+ fp::swap_(p1, p2);
+ }
+ assert(p1->size_ >= p2->size_);
+ for (size_t i = 0; i < p2->size_; i++) {
+ fp::swap_(p1->v_[i], p2->v_[i]);
+ }
+ for (size_t i = p2->size_; i < p1->size_; i++) {
+ p2->v_[i] = p1->v_[i];
+ }
+ fp::swap_(p1->size_, p2->size_);
+ }
+ // to avoid warning of gcc
+ void verify(size_t n) const
+ {
+ assert(n <= N);
+ (void)n;
+ }
+ const T& operator[](size_t n) const { verify(n); return v_[n]; }
+ T& operator[](size_t n) { verify(n); return v_[n]; }
+};
+
+#if MCL_SIZEOF_UNIT == 8
+/*
+ M = 1 << 256
+ a = M mod p = (1 << 32) + 0x3d1
+ [H:L] mod p = H * a + L
+
+ if H = L = M - 1, t = H * a + L = aM + (M - a - 1)
+ H' = a, L' = M - a - 1
+ t' = H' * a + L' = M + (a^2 - a - 1)
+ H'' = 1, L'' = a^2 - a - 1
+ t'' = H'' * a + L'' = a^2 - 1
+*/
+inline void mcl_fpDbl_mod_SECP256K1(Unit *z, const Unit *x, const Unit *p)
+{
+ const Unit a = (uint64_t(1) << 32) + 0x3d1;
+ Unit buf[5];
+ buf[4] = mulu1(buf, x + 4, 4, a); // H * a
+ buf[4] += addN(buf, buf, x, 4); // t = H * a + L
+ Unit x2[2];
+ x2[0] = mulUnit(&x2[1], buf[4], a);
+ Unit x3 = addN(buf, buf, x2, 2);
+ if (x3) {
+ x3 = addu1(buf + 2, buf + 2, 2, Unit(1)); // t' = H' * a + L'
+ if (x3) {
+ x3 = addu1(buf, buf, 4, a);
+ assert(x3 == 0);
+ }
+ }
+ if (fp::isGreaterOrEqualArray(buf, p, 4)) {
+ subN(z, buf, p, 4);
+ } else {
+ fp::copyArray(z, buf, 4);
+ }
+}
+
+inline void mcl_fp_mul_SECP256K1(Unit *z, const Unit *x, const Unit *y, const Unit *p)
+{
+ Unit xy[8];
+ mulNM(xy, x, 4, y, 4);
+ mcl_fpDbl_mod_SECP256K1(z, xy, p);
+}
+inline void mcl_fp_sqr_SECP256K1(Unit *y, const Unit *x, const Unit *p)
+{
+ Unit xx[8];
+ sqrN(xx, x, 4);
+ mcl_fpDbl_mod_SECP256K1(y, xx, p);
+}
+#endif
+
+} // vint
+
+/**
+ signed integer with variable length
+*/
+template<class _Buffer>
+class VintT {
+public:
+ typedef _Buffer Buffer;
+ typedef typename Buffer::Unit Unit;
+ static const size_t unitBitSize = sizeof(Unit) * 8;
+ static const int invalidVar = -2147483647 - 1; // abs(invalidVar) is not defined
+private:
+ Buffer buf_;
+ size_t size_;
+ bool isNeg_;
+ void trim(size_t n)
+ {
+ assert(n > 0);
+ int i = (int)n - 1;
+ for (; i > 0; i--) {
+ if (buf_[i]) {
+ size_ = i + 1;
+ return;
+ }
+ }
+ size_ = 1;
+ // zero
+ if (buf_[0] == 0) {
+ isNeg_ = false;
+ }
+ }
+ static int ucompare(const Buffer& x, size_t xn, const Buffer& y, size_t yn)
+ {
+ return vint::compareNM(&x[0], xn, &y[0], yn);
+ }
+ static void uadd(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn)
+ {
+ size_t zn = fp::max_(xn, yn) + 1;
+ bool b;
+ z.buf_.alloc(&b, zn);
+ assert(b); (void)b;
+ z.buf_[zn - 1] = vint::addNM(&z.buf_[0], &x[0], xn, &y[0], yn);
+ z.trim(zn);
+ }
+ static void uadd1(VintT& z, const Buffer& x, size_t xn, Unit y)
+ {
+ size_t zn = xn + 1;
+ bool b;
+ z.buf_.alloc(&b, zn);
+ assert(b); (void)b;
+ z.buf_[zn - 1] = vint::addu1(&z.buf_[0], &x[0], xn, y);
+ z.trim(zn);
+ }
+ static void usub1(VintT& z, const Buffer& x, size_t xn, Unit y)
+ {
+ size_t zn = xn;
+ bool b;
+ z.buf_.alloc(&b, zn);
+ assert(b); (void)b;
+ Unit c = vint::subu1(&z.buf_[0], &x[0], xn, y);
+ (void)c;
+ assert(!c);
+ z.trim(zn);
+ }
+ static void usub(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn)
+ {
+ assert(xn >= yn);
+ bool b;
+ z.buf_.alloc(&b, xn);
+ assert(b); (void)b;
+ Unit c = vint::subN(&z.buf_[0], &x[0], &y[0], yn);
+ if (xn > yn) {
+ c = vint::subu1(&z.buf_[yn], &x[yn], xn - yn, c);
+ }
+ assert(!c);
+ z.trim(xn);
+ }
+ static void _add(VintT& z, const VintT& x, bool xNeg, const VintT& y, bool yNeg)
+ {
+ if ((xNeg ^ yNeg) == 0) {
+ // same sign
+ uadd(z, x.buf_, x.size(), y.buf_, y.size());
+ z.isNeg_ = xNeg;
+ return;
+ }
+ int r = ucompare(x.buf_, x.size(), y.buf_, y.size());
+ if (r >= 0) {
+ usub(z, x.buf_, x.size(), y.buf_, y.size());
+ z.isNeg_ = xNeg;
+ } else {
+ usub(z, y.buf_, y.size(), x.buf_, x.size());
+ z.isNeg_ = yNeg;
+ }
+ }
+ static void _adds1(VintT& z, const VintT& x, int y, bool yNeg)
+ {
+ assert(y >= 0);
+ if ((x.isNeg_ ^ yNeg) == 0) {
+ // same sign
+ uadd1(z, x.buf_, x.size(), y);
+ z.isNeg_ = yNeg;
+ return;
+ }
+ if (x.size() > 1 || x.buf_[0] >= (Unit)y) {
+ usub1(z, x.buf_, x.size(), y);
+ z.isNeg_ = x.isNeg_;
+ } else {
+ z = y - x.buf_[0];
+ z.isNeg_ = yNeg;
+ }
+ }
+ static void _addu1(VintT& z, const VintT& x, Unit y, bool yNeg)
+ {
+ if ((x.isNeg_ ^ yNeg) == 0) {
+ // same sign
+ uadd1(z, x.buf_, x.size(), y);
+ z.isNeg_ = yNeg;
+ return;
+ }
+ if (x.size() > 1 || x.buf_[0] >= y) {
+ usub1(z, x.buf_, x.size(), y);
+ z.isNeg_ = x.isNeg_;
+ } else {
+ z = y - x.buf_[0];
+ z.isNeg_ = yNeg;
+ }
+ }
+ /**
+ @param q [out] x / y if q != 0
+ @param r [out] x % y
+ */
+ static void udiv(VintT* q, VintT& r, const Buffer& x, size_t xn, const Buffer& y, size_t yn)
+ {
+ assert(q != &r);
+ if (xn < yn) {
+ r.buf_ = x;
+ r.trim(xn);
+ if (q) q->clear();
+ return;
+ }
+ size_t qn = xn - yn + 1;
+ bool b;
+ if (q) {
+ q->buf_.alloc(&b, qn);
+ assert(b); (void)b;
+ }
+ r.buf_.alloc(&b, yn);
+ assert(b); (void)b;
+ vint::divNM(q ? &q->buf_[0] : 0, qn, &r.buf_[0], &x[0], xn, &y[0], yn);
+ if (q) {
+ q->trim(qn);
+ }
+ r.trim(yn);
+ }
+ /*
+ @param x [inout] x <- d
+ @retval s for x = 2^s d where d is odd
+ */
+ static uint32_t countTrailingZero(VintT& x)
+ {
+ uint32_t s = 0;
+ while (x.isEven()) {
+ x >>= 1;
+ s++;
+ }
+ return s;
+ }
+ struct MulMod {
+ const VintT *pm;
+ void operator()(VintT& z, const VintT& x, const VintT& y) const
+ {
+ VintT::mul(z, x, y);
+ z %= *pm;
+ }
+ };
+ struct SqrMod {
+ const VintT *pm;
+ void operator()(VintT& y, const VintT& x) const
+ {
+ VintT::sqr(y, x);
+ y %= *pm;
+ }
+ };
+public:
+ VintT(int x = 0)
+ : size_(0)
+ {
+ *this = x;
+ }
+ VintT(Unit x)
+ : size_(0)
+ {
+ *this = x;
+ }
+ VintT(const VintT& rhs)
+ : buf_(rhs.buf_)
+ , size_(rhs.size_)
+ , isNeg_(rhs.isNeg_)
+ {
+ }
+ VintT& operator=(int x)
+ {
+ assert(x != invalidVar);
+ isNeg_ = x < 0;
+ bool b;
+ buf_.alloc(&b, 1);
+ assert(b); (void)b;
+ buf_[0] = fp::abs_(x);
+ size_ = 1;
+ return *this;
+ }
+ VintT& operator=(Unit x)
+ {
+ isNeg_ = false;
+ bool b;
+ buf_.alloc(&b, 1);
+ assert(b); (void)b;
+ buf_[0] = x;
+ size_ = 1;
+ return *this;
+ }
+ VintT& operator=(const VintT& rhs)
+ {
+ buf_ = rhs.buf_;
+ size_ = rhs.size_;
+ isNeg_ = rhs.isNeg_;
+ return *this;
+ }
+#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+ VintT(VintT&& rhs)
+ : buf_(rhs.buf_)
+ , size_(rhs.size_)
+ , isNeg_(rhs.isNeg_)
+ {
+ }
+ VintT& operator=(VintT&& rhs)
+ {
+ buf_ = std::move(rhs.buf_);
+ size_ = rhs.size_;
+ isNeg_ = rhs.isNeg_;
+ return *this;
+ }
+#endif
+ void swap(VintT& rhs)
+#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11
+ noexcept
+#endif
+ {
+ fp::swap_(buf_, rhs.buf_);
+ fp::swap_(size_, rhs.size_);
+ fp::swap_(isNeg_, rhs.isNeg_);
+ }
+ void dump(const char *msg = "") const
+ {
+ vint::dump(&buf_[0], size_, msg);
+ }
+ /*
+ set positive value
+ @note assume little endian system
+ */
+ template<class S>
+ void setArray(bool *pb, const S *x, size_t size)
+ {
+ isNeg_ = false;
+ if (size == 0) {
+ clear();
+ *pb = true;
+ return;
+ }
+ size_t unitSize = (sizeof(S) * size + sizeof(Unit) - 1) / sizeof(Unit);
+ buf_.alloc(pb, unitSize);
+ if (!*pb) return;
+ char *dst = (char *)&buf_[0];
+ const char *src = (const char *)x;
+ size_t i = 0;
+ for (; i < sizeof(S) * size; i++) {
+ dst[i] = src[i];
+ }
+ for (; i < sizeof(Unit) * unitSize; i++) {
+ dst[i] = 0;
+ }
+ trim(unitSize);
+ }
+ /*
+ set [0, max) randomly
+ */
+ void setRand(bool *pb, const VintT& max, fp::RandGen rg = fp::RandGen())
+ {
+ assert(max > 0);
+ if (rg.isZero()) rg = fp::RandGen::get();
+ size_t n = max.size();
+ buf_.alloc(pb, n);
+ if (!*pb) return;
+ rg.read(pb, &buf_[0], n * sizeof(buf_[0]));
+ if (!*pb) return;
+ trim(n);
+ *this %= max;
+ }
+ /*
+ get abs value
+ buf_[0, size) = x
+ buf_[size, maxSize) with zero
+ @note assume little endian system
+ */
+ void getArray(bool *pb, Unit *x, size_t maxSize) const
+ {
+ size_t n = size();
+ if (n > maxSize) {
+ *pb = false;
+ return;
+ }
+ vint::copyN(x, &buf_[0], n);
+ vint::clearN(x + n, maxSize - n);
+ *pb = true;
+ }
+ void clear() { *this = 0; }
+ template<class OutputStream>
+ void save(bool *pb, OutputStream& os, int base = 10) const
+ {
+ if (isNeg_) cybozu::writeChar(pb, os, '-');
+ char buf[1024];
+ size_t n = mcl::fp::arrayToStr(buf, sizeof(buf), &buf_[0], size_, base, false);
+ if (n == 0) {
+ *pb = false;
+ return;
+ }
+ cybozu::write(pb, os, buf + sizeof(buf) - n, n);
+ }
+ /*
+ set buf with string terminated by '\0'
+ return strlen(buf) if success else 0
+ */
+ size_t getStr(char *buf, size_t bufSize, int base = 10) const
+ {
+ cybozu::MemoryOutputStream os(buf, bufSize);
+ bool b;
+ save(&b, os, base);
+ const size_t n = os.getPos();
+ if (!b || n == bufSize) return 0;
+ buf[n] = '\0';
+ return n;
+ }
+ /*
+ return bitSize(abs(*this))
+ @note return 1 if zero
+ */
+ size_t getBitSize() const
+ {
+ if (isZero()) return 1;
+ size_t n = size();
+ Unit v = buf_[n - 1];
+ assert(v);
+ return (n - 1) * sizeof(Unit) * 8 + 1 + cybozu::bsr<Unit>(v);
+ }
+ // ignore sign
+ bool testBit(size_t i) const
+ {
+ size_t q = i / unitBitSize;
+ size_t r = i % unitBitSize;
+ assert(q <= size());
+ Unit mask = Unit(1) << r;
+ return (buf_[q] & mask) != 0;
+ }
+ void setBit(size_t i, bool v = true)
+ {
+ size_t q = i / unitBitSize;
+ size_t r = i % unitBitSize;
+ assert(q <= size());
+ bool b;
+ buf_.alloc(&b, q + 1);
+ assert(b); (void)b;
+ Unit mask = Unit(1) << r;
+ if (v) {
+ buf_[q] |= mask;
+ } else {
+ buf_[q] &= ~mask;
+ trim(q + 1);
+ }
+ }
+ /*
+ @param str [in] number string
+ @note "0x..." => base = 16
+ "0b..." => base = 2
+ otherwise => base = 10
+ */
+ void setStr(bool *pb, const char *str, int base = 0)
+ {
+ // allow twice size of MCL_MAX_BIT_SIZE because of multiplication
+ const size_t maxN = (MCL_MAX_BIT_SIZE * 2 + unitBitSize - 1) / unitBitSize;
+ buf_.alloc(pb, maxN);
+ if (!*pb) return;
+ *pb = false;
+ isNeg_ = false;
+ size_t len = strlen(str);
+ size_t n = fp::strToArray(&isNeg_, &buf_[0], maxN, str, len, base);
+ if (n == 0) return;
+ trim(n);
+ *pb = true;
+ }
+ static int compare(const VintT& x, const VintT& y)
+ {
+ if (x.isNeg_ ^ y.isNeg_) {
+ if (x.isZero() && y.isZero()) return 0;
+ return x.isNeg_ ? -1 : 1;
+ } else {
+ // same sign
+ int c = ucompare(x.buf_, x.size(), y.buf_, y.size());
+ if (x.isNeg_) {
+ return -c;
+ }
+ return c;
+ }
+ }
+ static int compares1(const VintT& x, int y)
+ {
+ assert(y != invalidVar);
+ if (x.isNeg_ ^ (y < 0)) {
+ if (x.isZero() && y == 0) return 0;
+ return x.isNeg_ ? -1 : 1;
+ } else {
+ // same sign
+ Unit y0 = fp::abs_(y);
+ int c = vint::compareNM(&x.buf_[0], x.size(), &y0, 1);
+ if (x.isNeg_) {
+ return -c;
+ }
+ return c;
+ }
+ }
+ static int compareu1(const VintT& x, uint32_t y)
+ {
+ if (x.isNeg_) return -1;
+ if (x.size() > 1) return 1;
+ Unit x0 = x.buf_[0];
+ return x0 > y ? 1 : x0 == y ? 0 : -1;
+ }
+ size_t size() const { return size_; }
+ bool isZero() const { return size() == 1 && buf_[0] == 0; }
+ bool isNegative() const { return !isZero() && isNeg_; }
+ uint32_t getLow32bit() const { return (uint32_t)buf_[0]; }
+ bool isOdd() const { return (buf_[0] & 1) == 1; }
+ bool isEven() const { return !isOdd(); }
+ const Unit *getUnit() const { return &buf_[0]; }
+ size_t getUnitSize() const { return size_; }
+ static void add(VintT& z, const VintT& x, const VintT& y)
+ {
+ _add(z, x, x.isNeg_, y, y.isNeg_);
+ }
+ static void sub(VintT& z, const VintT& x, const VintT& y)
+ {
+ _add(z, x, x.isNeg_, y, !y.isNeg_);
+ }
+ static void mul(VintT& z, const VintT& x, const VintT& y)
+ {
+ const size_t xn = x.size();
+ const size_t yn = y.size();
+ size_t zn = xn + yn;
+ bool b;
+ z.buf_.alloc(&b, zn);
+ assert(b); (void)b;
+ vint::mulNM(&z.buf_[0], &x.buf_[0], xn, &y.buf_[0], yn);
+ z.isNeg_ = x.isNeg_ ^ y.isNeg_;
+ z.trim(zn);
+ }
+ static void sqr(VintT& y, const VintT& x)
+ {
+ mul(y, x, x);
+ }
+ static void addu1(VintT& z, const VintT& x, Unit y)
+ {
+ _addu1(z, x, y, false);
+ }
+ static void subu1(VintT& z, const VintT& x, Unit y)
+ {
+ _addu1(z, x, y, true);
+ }
+ static void mulu1(VintT& z, const VintT& x, Unit y)
+ {
+ size_t xn = x.size();
+ size_t zn = xn + 1;
+ bool b;
+ z.buf_.alloc(&b, zn);
+ assert(b); (void)b;
+ z.buf_[zn - 1] = vint::mulu1(&z.buf_[0], &x.buf_[0], xn, y);
+ z.isNeg_ = x.isNeg_;
+ z.trim(zn);
+ }
+ static void divu1(VintT& q, const VintT& x, Unit y)
+ {
+ udivModu1(&q, x, y);
+ }
+ static void modu1(VintT& r, const VintT& x, Unit y)
+ {
+ bool xNeg = x.isNeg_;
+ r = divModu1(0, x, y);
+ r.isNeg_ = xNeg;
+ }
+ static void adds1(VintT& z, const VintT& x, int y)
+ {
+ assert(y != invalidVar);
+ _adds1(z, x, fp::abs_(y), y < 0);
+ }
+ static void subs1(VintT& z, const VintT& x, int y)
+ {
+ assert(y != invalidVar);
+ _adds1(z, x, fp::abs_(y), !(y < 0));
+ }
+ static void muls1(VintT& z, const VintT& x, int y)
+ {
+ assert(y != invalidVar);
+ mulu1(z, x, fp::abs_(y));
+ z.isNeg_ ^= (y < 0);
+ }
+ /*
+ @param q [out] q = x / y if q is not zero
+ @param x [in]
+ @param y [in] must be not zero
+ return x % y
+ */
+ static int divMods1(VintT *q, const VintT& x, int y)
+ {
+ assert(y != invalidVar);
+ bool xNeg = x.isNeg_;
+ bool yNeg = y < 0;
+ Unit absY = fp::abs_(y);
+ size_t xn = x.size();
+ int r;
+ if (q) {
+ q->isNeg_ = xNeg ^ yNeg;
+ bool b;
+ q->buf_.alloc(&b, xn);
+ assert(b); (void)b;
+ r = (int)vint::divu1(&q->buf_[0], &x.buf_[0], xn, absY);
+ q->trim(xn);
+ } else {
+ r = (int)vint::modu1(&x.buf_[0], xn, absY);
+ }
+ return xNeg ? -r : r;
+ }
+ /*
+ like C
+ 13 / 5 = 2 ... 3
+ 13 / -5 = -2 ... 3
+ -13 / 5 = -2 ... -3
+ -13 / -5 = 2 ... -3
+ */
+ static void divMod(VintT *q, VintT& r, const VintT& x, const VintT& y)
+ {
+ bool qsign = x.isNeg_ ^ y.isNeg_;
+ udiv(q, r, x.buf_, x.size(), y.buf_, y.size());
+ r.isNeg_ = x.isNeg_;
+ if (q) q->isNeg_ = qsign;
+ }
+ static void div(VintT& q, const VintT& x, const VintT& y)
+ {
+ VintT r;
+ divMod(&q, r, x, y);
+ }
+ static void mod(VintT& r, const VintT& x, const VintT& y)
+ {
+ divMod(0, r, x, y);
+ }
+ static void divs1(VintT& q, const VintT& x, int y)
+ {
+ divMods1(&q, x, y);
+ }
+ static void mods1(VintT& r, const VintT& x, int y)
+ {
+ bool xNeg = x.isNeg_;
+ r = divMods1(0, x, y);
+ r.isNeg_ = xNeg;
+ }
+ static Unit udivModu1(VintT *q, const VintT& x, Unit y)
+ {
+ assert(!x.isNeg_);
+ size_t xn = x.size();
+ if (q) {
+ bool b;
+ q->buf_.alloc(&b, xn);
+ assert(b); (void)b;
+ }
+ Unit r = vint::divu1(q ? &q->buf_[0] : 0, &x.buf_[0], xn, y);
+ if (q) {
+ q->trim(xn);
+ q->isNeg_ = false;
+ }
+ return r;
+ }
+ /*
+ like Python
+ 13 / 5 = 2 ... 3
+ 13 / -5 = -3 ... -2
+ -13 / 5 = -3 ... 2
+ -13 / -5 = 2 ... -3
+ */
+ static void quotRem(VintT *q, VintT& r, const VintT& x, const VintT& y)
+ {
+ VintT yy = y;
+ bool qsign = x.isNeg_ ^ y.isNeg_;
+ udiv(q, r, x.buf_, x.size(), y.buf_, y.size());
+ r.isNeg_ = y.isNeg_;
+ if (q) q->isNeg_ = qsign;
+ if (!r.isZero() && qsign) {
+ if (q) {
+ uadd1(*q, q->buf_, q->size(), 1);
+ }
+ usub(r, yy.buf_, yy.size(), r.buf_, r.size());
+ }
+ }
+ template<class InputStream>
+ void load(bool *pb, InputStream& is, int ioMode)
+ {
+ *pb = false;
+ char buf[1024];
+ size_t n = fp::local::loadWord(buf, sizeof(buf), is);
+ if (n == 0) return;
+ const size_t maxN = 384 / (sizeof(MCL_SIZEOF_UNIT) * 8);
+ buf_.alloc(pb, maxN);
+ if (!*pb) return;
+ isNeg_ = false;
+ n = fp::strToArray(&isNeg_, &buf_[0], maxN, buf, n, ioMode);
+ if (n == 0) return;
+ trim(n);
+ *pb = true;
+ }
+ // logical left shift (copy sign)
+ static void shl(VintT& y, const VintT& x, size_t shiftBit)
+ {
+ size_t xn = x.size();
+ size_t yn = xn + (shiftBit + unitBitSize - 1) / unitBitSize;
+ bool b;
+ y.buf_.alloc(&b, yn);
+ assert(b); (void)b;
+ vint::shlN(&y.buf_[0], &x.buf_[0], xn, shiftBit);
+ y.isNeg_ = x.isNeg_;
+ y.trim(yn);
+ }
+ // logical right shift (copy sign)
+ static void shr(VintT& y, const VintT& x, size_t shiftBit)
+ {
+ size_t xn = x.size();
+ if (xn * unitBitSize <= shiftBit) {
+ y.clear();
+ return;
+ }
+ size_t yn = xn - shiftBit / unitBitSize;
+ bool b;
+ y.buf_.alloc(&b, yn);
+ assert(b); (void)b;
+ vint::shrN(&y.buf_[0], &x.buf_[0], xn, shiftBit);
+ y.isNeg_ = x.isNeg_;
+ y.trim(yn);
+ }
+ static void neg(VintT& y, const VintT& x)
+ {
+ if (&y != &x) { y = x; }
+ y.isNeg_ = !x.isNeg_;
+ }
+ static void abs(VintT& y, const VintT& x)
+ {
+ if (&y != &x) { y = x; }
+ y.isNeg_ = false;
+ }
+ static VintT abs(const VintT& x)
+ {
+ VintT y = x;
+ abs(y, x);
+ return y;
+ }
+ // accept only non-negative value
+ static void orBit(VintT& z, const VintT& x, const VintT& y)
+ {
+ assert(!x.isNeg_ && !y.isNeg_);
+ const VintT *px = &x, *py = &y;
+ if (x.size() < y.size()) {
+ fp::swap_(px, py);
+ }
+ size_t xn = px->size();
+ size_t yn = py->size();
+ assert(xn >= yn);
+ bool b;
+ z.buf_.alloc(&b, xn);
+ assert(b); (void)b;
+ for (size_t i = 0; i < yn; i++) {
+ z.buf_[i] = x.buf_[i] | y.buf_[i];
+ }
+ vint::copyN(&z.buf_[0] + yn, &px->buf_[0] + yn, xn - yn);
+ z.trim(xn);
+ }
+ static void andBit(VintT& z, const VintT& x, const VintT& y)
+ {
+ assert(!x.isNeg_ && !y.isNeg_);
+ const VintT *px = &x, *py = &y;
+ if (x.size() < y.size()) {
+ fp::swap_(px, py);
+ }
+ size_t yn = py->size();
+ assert(px->size() >= yn);
+ bool b;
+ z.buf_.alloc(&b, yn);
+ assert(b); (void)b;
+ for (size_t i = 0; i < yn; i++) {
+ z.buf_[i] = x.buf_[i] & y.buf_[i];
+ }
+ z.trim(yn);
+ }
+ static void orBitu1(VintT& z, const VintT& x, Unit y)
+ {
+ assert(!x.isNeg_);
+ z = x;
+ z.buf_[0] |= y;
+ }
+ static void andBitu1(VintT& z, const VintT& x, Unit y)
+ {
+ assert(!x.isNeg_);
+ bool b;
+ z.buf_.alloc(&b, 1);
+ assert(b); (void)b;
+ z.buf_[0] = x.buf_[0] & y;
+ z.size_ = 1;
+ z.isNeg_ = false;
+ }
+ /*
+ REMARK y >= 0;
+ */
+ static void pow(VintT& z, const VintT& x, const VintT& y)
+ {
+ assert(!y.isNeg_);
+ const VintT xx = x;
+ z = 1;
+ mcl::fp::powGeneric(z, xx, &y.buf_[0], y.size(), mul, sqr, (void (*)(VintT&, const VintT&))0);
+ }
+ /*
+ REMARK y >= 0;
+ */
+ static void pow(VintT& z, const VintT& x, int64_t y)
+ {
+ assert(y >= 0);
+ const VintT xx = x;
+ z = 1;
+#if MCL_SIZEOF_UNIT == 8
+ Unit ua = fp::abs_(y);
+ mcl::fp::powGeneric(z, xx, &ua, 1, mul, sqr, (void (*)(VintT&, const VintT&))0);
+#else
+ uint64_t ua = fp::abs_(y);
+ Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) };
+ size_t un = u[1] ? 2 : 1;
+ mcl::fp::powGeneric(z, xx, u, un, mul, sqr, (void (*)(VintT&, const VintT&))0);
+#endif
+ }
+ /*
+ z = x ^ y mod m
+ REMARK y >= 0;
+ */
+ static void powMod(VintT& z, const VintT& x, const VintT& y, const VintT& m)
+ {
+ assert(!y.isNeg_);
+ VintT zz;
+ MulMod mulMod;
+ SqrMod sqrMod;
+ mulMod.pm = &m;
+ sqrMod.pm = &m;
+ zz = 1;
+ mcl::fp::powGeneric(zz, x, &y.buf_[0], y.size(), mulMod, sqrMod, (void (*)(VintT&, const VintT&))0);
+ z.swap(zz);
+ }
+ /*
+ inverse mod
+ y = 1/x mod m
+ REMARK x != 0 and m != 0;
+ */
+ static void invMod(VintT& y, const VintT& x, const VintT& m)
+ {
+ assert(!x.isZero() && !m.isZero());
+ if (x == 1) {
+ y = 1;
+ return;
+ }
+ VintT a = 1;
+ VintT t;
+ VintT q;
+ divMod(&q, t, m, x);
+ VintT s = x;
+ VintT b = -q;
+
+ for (;;) {
+ divMod(&q, s, s, t);
+ if (s.isZero()) {
+ if (b.isNeg_) {
+ b += m;
+ }
+ y = b;
+ return;
+ }
+ a -= b * q;
+
+ divMod(&q, t, t, s);
+ if (t.isZero()) {
+ if (a.isNeg_) {
+ a += m;
+ }
+ y = a;
+ return;
+ }
+ b -= a * q;
+ }
+ }
+ /*
+ Miller-Rabin
+ */
+ static bool isPrime(bool *pb, const VintT& n, int tryNum = 32)
+ {
+ *pb = true;
+ if (n <= 1) return false;
+ if (n == 2 || n == 3) return true;
+ if (n.isEven()) return false;
+ cybozu::XorShift rg;
+ const VintT nm1 = n - 1;
+ VintT d = nm1;
+ uint32_t r = countTrailingZero(d);
+ // n - 1 = 2^r d
+ VintT a, x;
+ for (int i = 0; i < tryNum; i++) {
+ a.setRand(pb, n - 3, rg);
+ if (!*pb) return false;
+ a += 2; // a in [2, n - 2]
+ powMod(x, a, d, n);
+ if (x == 1 || x == nm1) {
+ continue;
+ }
+ for (uint32_t j = 1; j < r; j++) {
+ sqr(x, x);
+ x %= n;
+ if (x == 1) return false;
+ if (x == nm1) goto NEXT_LOOP;
+ }
+ return false;
+ NEXT_LOOP:;
+ }
+ return true;
+ }
+ bool isPrime(bool *pb, int tryNum = 32) const
+ {
+ return isPrime(pb, *this, tryNum);
+ }
+ static void gcd(VintT& z, VintT x, VintT y)
+ {
+ VintT t;
+ for (;;) {
+ if (y.isZero()) {
+ z = x;
+ return;
+ }
+ t = x;
+ x = y;
+ mod(y, t, y);
+ }
+ }
+ static VintT gcd(const VintT& x, const VintT& y)
+ {
+ VintT z;
+ gcd(z, x, y);
+ return z;
+ }
+ static void lcm(VintT& z, const VintT& x, const VintT& y)
+ {
+ VintT c;
+ gcd(c, x, y);
+ div(c, x, c);
+ mul(z, c, y);
+ }
+ static VintT lcm(const VintT& x, const VintT& y)
+ {
+ VintT z;
+ lcm(z, x, y);
+ return z;
+ }
+ /*
+ 1 if m is quadratic residue modulo n (i.e., there exists an x s.t. x^2 = m mod n)
+ 0 if m = 0 mod n
+ -1 otherwise
+ @note return legendre_symbol(m, p) for m and odd prime p
+ */
+ static int jacobi(VintT m, VintT n)
+ {
+ assert(n.isOdd());
+ if (n == 1) return 1;
+ if (m < 0 || m > n) {
+ quotRem(0, m, m, n); // m = m mod n
+ }
+ if (m.isZero()) return 0;
+ if (m == 1) return 1;
+ if (gcd(m, n) != 1) return 0;
+
+ int j = 1;
+ VintT t;
+ goto START;
+ while (m != 1) {
+ if ((m.getLow32bit() % 4) == 3 && (n.getLow32bit() % 4) == 3) {
+ j = -j;
+ }
+ mod(t, n, m);
+ n = m;
+ m = t;
+ START:
+ int s = countTrailingZero(m);
+ uint32_t nmod8 = n.getLow32bit() % 8;
+ if ((s % 2) && (nmod8 == 3 || nmod8 == 5)) {
+ j = -j;
+ }
+ }
+ return j;
+ }
+#ifndef CYBOZU_DONT_USE_STRING
+ explicit VintT(const std::string& str)
+ : size_(0)
+ {
+ setStr(str);
+ }
+ void getStr(std::string& s, int base = 10) const
+ {
+ s.clear();
+ cybozu::StringOutputStream os(s);
+ save(os, base);
+ }
+ std::string getStr(int base = 10) const
+ {
+ std::string s;
+ getStr(s, base);
+ return s;
+ }
+ inline friend std::ostream& operator<<(std::ostream& os, const VintT& x)
+ {
+ return os << x.getStr(os.flags() & std::ios_base::hex ? 16 : 10);
+ }
+ inline friend std::istream& operator>>(std::istream& is, VintT& x)
+ {
+ x.load(is);
+ return is;
+ }
+#endif
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void setStr(const std::string& str, int base = 0)
+ {
+ bool b;
+ setStr(&b, str.c_str(), base);
+ if (!b) throw cybozu::Exception("Vint:setStr") << str;
+ }
+ void setRand(const VintT& max, fp::RandGen rg = fp::RandGen())
+ {
+ bool b;
+ setRand(&b, max, rg);
+ if (!b) throw cybozu::Exception("Vint:setRand");
+ }
+ void getArray(Unit *x, size_t maxSize) const
+ {
+ bool b;
+ getArray(&b, x, maxSize);
+ if (!b) throw cybozu::Exception("Vint:getArray");
+ }
+ template<class InputStream>
+ void load(InputStream& is, int ioMode = 0)
+ {
+ bool b;
+ load(&b, is, ioMode);
+ if (!b) throw cybozu::Exception("Vint:load");
+ }
+ template<class OutputStream>
+ void save(OutputStream& os, int base = 10) const
+ {
+ bool b;
+ save(&b, os, base);
+ if (!b) throw cybozu::Exception("Vint:save");
+ }
+ static bool isPrime(const VintT& n, int tryNum = 32)
+ {
+ bool b;
+ bool ret = isPrime(&b, n, tryNum);
+ if (!b) throw cybozu::Exception("Vint:isPrime");
+ return ret;
+ }
+ bool isPrime(int tryNum = 32) const
+ {
+ bool b;
+ bool ret = isPrime(&b, *this, tryNum);
+ if (!b) throw cybozu::Exception("Vint:isPrime");
+ return ret;
+ }
+ template<class S>
+ void setArray(const S *x, size_t size)
+ {
+ bool b;
+ setArray(&b, x, size);
+ if (!b) throw cybozu::Exception("Vint:setArray");
+ }
+#endif
+ VintT& operator++() { adds1(*this, *this, 1); return *this; }
+ VintT& operator--() { subs1(*this, *this, 1); return *this; }
+ VintT operator++(int) { VintT c = *this; adds1(*this, *this, 1); return c; }
+ VintT operator--(int) { VintT c = *this; subs1(*this, *this, 1); return c; }
+ friend bool operator<(const VintT& x, const VintT& y) { return compare(x, y) < 0; }
+ friend bool operator>=(const VintT& x, const VintT& y) { return !operator<(x, y); }
+ friend bool operator>(const VintT& x, const VintT& y) { return compare(x, y) > 0; }
+ friend bool operator<=(const VintT& x, const VintT& y) { return !operator>(x, y); }
+ friend bool operator==(const VintT& x, const VintT& y) { return compare(x, y) == 0; }
+ friend bool operator!=(const VintT& x, const VintT& y) { return !operator==(x, y); }
+
+ friend bool operator<(const VintT& x, int y) { return compares1(x, y) < 0; }
+ friend bool operator>=(const VintT& x, int y) { return !operator<(x, y); }
+ friend bool operator>(const VintT& x, int y) { return compares1(x, y) > 0; }
+ friend bool operator<=(const VintT& x, int y) { return !operator>(x, y); }
+ friend bool operator==(const VintT& x, int y) { return compares1(x, y) == 0; }
+ friend bool operator!=(const VintT& x, int y) { return !operator==(x, y); }
+
+ friend bool operator<(const VintT& x, uint32_t y) { return compareu1(x, y) < 0; }
+ friend bool operator>=(const VintT& x, uint32_t y) { return !operator<(x, y); }
+ friend bool operator>(const VintT& x, uint32_t y) { return compareu1(x, y) > 0; }
+ friend bool operator<=(const VintT& x, uint32_t y) { return !operator>(x, y); }
+ friend bool operator==(const VintT& x, uint32_t y) { return compareu1(x, y) == 0; }
+ friend bool operator!=(const VintT& x, uint32_t y) { return !operator==(x, y); }
+
+ VintT& operator+=(const VintT& rhs) { add(*this, *this, rhs); return *this; }
+ VintT& operator-=(const VintT& rhs) { sub(*this, *this, rhs); return *this; }
+ VintT& operator*=(const VintT& rhs) { mul(*this, *this, rhs); return *this; }
+ VintT& operator/=(const VintT& rhs) { div(*this, *this, rhs); return *this; }
+ VintT& operator%=(const VintT& rhs) { mod(*this, *this, rhs); return *this; }
+ VintT& operator&=(const VintT& rhs) { andBit(*this, *this, rhs); return *this; }
+ VintT& operator|=(const VintT& rhs) { orBit(*this, *this, rhs); return *this; }
+
+ VintT& operator+=(int rhs) { adds1(*this, *this, rhs); return *this; }
+ VintT& operator-=(int rhs) { subs1(*this, *this, rhs); return *this; }
+ VintT& operator*=(int rhs) { muls1(*this, *this, rhs); return *this; }
+ VintT& operator/=(int rhs) { divs1(*this, *this, rhs); return *this; }
+ VintT& operator%=(int rhs) { mods1(*this, *this, rhs); return *this; }
+ VintT& operator+=(Unit rhs) { addu1(*this, *this, rhs); return *this; }
+ VintT& operator-=(Unit rhs) { subu1(*this, *this, rhs); return *this; }
+ VintT& operator*=(Unit rhs) { mulu1(*this, *this, rhs); return *this; }
+ VintT& operator/=(Unit rhs) { divu1(*this, *this, rhs); return *this; }
+ VintT& operator%=(Unit rhs) { modu1(*this, *this, rhs); return *this; }
+
+ VintT& operator&=(Unit rhs) { andBitu1(*this, *this, rhs); return *this; }
+ VintT& operator|=(Unit rhs) { orBitu1(*this, *this, rhs); return *this; }
+
+ friend VintT operator+(const VintT& a, const VintT& b) { VintT c; add(c, a, b); return c; }
+ friend VintT operator-(const VintT& a, const VintT& b) { VintT c; sub(c, a, b); return c; }
+ friend VintT operator*(const VintT& a, const VintT& b) { VintT c; mul(c, a, b); return c; }
+ friend VintT operator/(const VintT& a, const VintT& b) { VintT c; div(c, a, b); return c; }
+ friend VintT operator%(const VintT& a, const VintT& b) { VintT c; mod(c, a, b); return c; }
+ friend VintT operator&(const VintT& a, const VintT& b) { VintT c; andBit(c, a, b); return c; }
+ friend VintT operator|(const VintT& a, const VintT& b) { VintT c; orBit(c, a, b); return c; }
+
+ friend VintT operator+(const VintT& a, int b) { VintT c; adds1(c, a, b); return c; }
+ friend VintT operator-(const VintT& a, int b) { VintT c; subs1(c, a, b); return c; }
+ friend VintT operator*(const VintT& a, int b) { VintT c; muls1(c, a, b); return c; }
+ friend VintT operator/(const VintT& a, int b) { VintT c; divs1(c, a, b); return c; }
+ friend VintT operator%(const VintT& a, int b) { VintT c; mods1(c, a, b); return c; }
+ friend VintT operator+(const VintT& a, Unit b) { VintT c; addu1(c, a, b); return c; }
+ friend VintT operator-(const VintT& a, Unit b) { VintT c; subu1(c, a, b); return c; }
+ friend VintT operator*(const VintT& a, Unit b) { VintT c; mulu1(c, a, b); return c; }
+ friend VintT operator/(const VintT& a, Unit b) { VintT c; divu1(c, a, b); return c; }
+ friend VintT operator%(const VintT& a, Unit b) { VintT c; modu1(c, a, b); return c; }
+
+ friend VintT operator&(const VintT& a, Unit b) { VintT c; andBitu1(c, a, b); return c; }
+ friend VintT operator|(const VintT& a, Unit b) { VintT c; orBitu1(c, a, b); return c; }
+
+ VintT operator-() const { VintT c; neg(c, *this); return c; }
+ VintT& operator<<=(size_t n) { shl(*this, *this, n); return *this; }
+ VintT& operator>>=(size_t n) { shr(*this, *this, n); return *this; }
+ VintT operator<<(size_t n) const { VintT c = *this; c <<= n; return c; }
+ VintT operator>>(size_t n) const { VintT c = *this; c >>= n; return c; }
+};
+
+#ifdef MCL_VINT_FIXED_BUFFER
+typedef VintT<vint::FixedBuffer<mcl::vint::Unit, MCL_MAX_BIT_SIZE * 2> > Vint;
+#else
+typedef VintT<vint::Buffer<mcl::vint::Unit> > Vint;
+#endif
+
+} // mcl
+
+//typedef mcl::Vint mpz_class;
diff --git a/vendor/github.com/tangerine-network/mcl/include/mcl/window_method.hpp b/vendor/github.com/tangerine-network/mcl/include/mcl/window_method.hpp
new file mode 100644
index 000000000..cb4fad37e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/include/mcl/window_method.hpp
@@ -0,0 +1,175 @@
+#pragma once
+/**
+ @file
+ @brief window method
+ @author MITSUNARI Shigeo(@herumi)
+*/
+#include <mcl/array.hpp>
+#include <mcl/fp.hpp>
+
+namespace mcl { namespace fp {
+
+/*
+ get w-bit size from x[0, bitSize)
+ @param x [in] data
+ @param bitSize [in] data size
+ @param w [in] split size < UnitBitSize
+*/
+template<class T>
+struct ArrayIterator {
+ static const size_t TbitSize = sizeof(T) * 8;
+ ArrayIterator(const T *x, size_t bitSize, size_t w)
+ : x(x)
+ , bitSize(bitSize)
+ , w(w)
+ , pos(0)
+ , mask((w == TbitSize ? 0 : (T(1) << w)) - 1)
+ {
+ assert(w <= TbitSize);
+ }
+ bool hasNext() const { return bitSize > 0; }
+ T getNext()
+ {
+ if (w == TbitSize) {
+ bitSize -= w;
+ return *x++;
+ }
+ if (pos + w < TbitSize) {
+ T v = (*x >> pos) & mask;
+ pos += w;
+ if (bitSize < w) {
+ bitSize = 0;
+ } else {
+ bitSize -= w;
+ }
+ return v;
+ }
+ if (pos + bitSize <= TbitSize) {
+ assert(bitSize <= w);
+ T v = *x >> pos;
+ assert((v >> bitSize) == 0);
+ bitSize = 0;
+ return v & mask;
+ }
+ assert(pos > 0);
+ T v = (x[0] >> pos) | (x[1] << (TbitSize - pos));
+ v &= mask;
+ pos = (pos + w) - TbitSize;
+ bitSize -= w;
+ x++;
+ return v;
+ }
+ const T *x;
+ size_t bitSize;
+ size_t w;
+ size_t pos;
+ T mask;
+};
+
+template<class Ec>
+class WindowMethod {
+public:
+ size_t bitSize_;
+ size_t winSize_;
+ mcl::Array<Ec> tbl_;
+ WindowMethod(const Ec& x, size_t bitSize, size_t winSize)
+ {
+ init(x, bitSize, winSize);
+ }
+ WindowMethod()
+ : bitSize_(0)
+ , winSize_(0)
+ {
+ }
+ /*
+ @param x [in] base index
+ @param bitSize [in] exponent bit length
+ @param winSize [in] window size
+ */
+ void init(bool *pb, const Ec& x, size_t bitSize, size_t winSize)
+ {
+ bitSize_ = bitSize;
+ winSize_ = winSize;
+ const size_t tblNum = (bitSize + winSize - 1) / winSize;
+ const size_t r = size_t(1) << winSize;
+ *pb = tbl_.resize(tblNum * r);
+ if (!*pb) return;
+ Ec t(x);
+ for (size_t i = 0; i < tblNum; i++) {
+ Ec* w = &tbl_[i * r];
+ w[0].clear();
+ for (size_t d = 1; d < r; d *= 2) {
+ for (size_t j = 0; j < d; j++) {
+ Ec::add(w[j + d], w[j], t);
+ }
+ Ec::dbl(t, t);
+ }
+ for (size_t j = 0; j < r; j++) {
+ w[j].normalize();
+ }
+ }
+ }
+#ifndef CYBOZU_DONT_USE_EXCEPTION
+ void init(const Ec& x, size_t bitSize, size_t winSize)
+ {
+ bool b;
+ init(&b, x, bitSize, winSize);
+ if (!b) throw cybozu::Exception("mcl:WindowMethod:init") << bitSize << winSize;
+ }
+#endif
+ /*
+ @param z [out] x multiplied by y
+ @param y [in] exponent
+ */
+ template<class tag2, size_t maxBitSize2>
+ void mul(Ec& z, const FpT<tag2, maxBitSize2>& y) const
+ {
+ fp::Block b;
+ y.getBlock(b);
+ powArray(z, b.p, b.n, false);
+ }
+ void mul(Ec& z, int64_t y) const
+ {
+#if MCL_SIZEOF_UNIT == 8
+ Unit u = fp::abs_(y);
+ powArray(z, &u, 1, y < 0);
+#else
+ uint64_t ua = fp::abs_(y);
+ Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) };
+ size_t un = u[1] ? 2 : 1;
+ powArray(z, u, un, y < 0);
+#endif
+ }
+ void mul(Ec& z, const mpz_class& y) const
+ {
+ powArray(z, gmp::getUnit(y), gmp::getUnitSize(y), y < 0);
+ }
+ void powArray(Ec& z, const Unit* y, size_t n, bool isNegative) const
+ {
+ z.clear();
+ while (n > 0) {
+ if (y[n - 1]) break;
+ n--;
+ }
+ if (n == 0) return;
+ assert((n << winSize_) <= tbl_.size());
+ if ((n << winSize_) > tbl_.size()) return;
+ assert(y[n - 1]);
+ const size_t bitSize = (n - 1) * UnitBitSize + cybozu::bsr<Unit>(y[n - 1]) + 1;
+ size_t i = 0;
+ ArrayIterator<Unit> ai(y, bitSize, winSize_);
+ do {
+ Unit v = ai.getNext();
+ if (v) {
+ Ec::add(z, z, tbl_[(i << winSize_) + v]);
+ }
+ i++;
+ } while (ai.hasNext());
+ if (isNegative) {
+ Ec::neg(z, z);
+ }
+ }
+};
+
+} } // mcl::fp
+
diff --git a/vendor/github.com/tangerine-network/mcl/lib/.emptydir b/vendor/github.com/tangerine-network/mcl/lib/.emptydir
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/lib/.emptydir
diff --git a/vendor/github.com/tangerine-network/mcl/mcl.sln b/vendor/github.com/tangerine-network/mcl/mcl.sln
new file mode 100644
index 000000000..7c4fe8f0c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/mcl.sln
@@ -0,0 +1,57 @@
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 2013
+VisualStudioVersion = 12.0.40629.0
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_test", "test\proj\fp_test\fp_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ec_test", "test\proj\ec_test\ec_test.vcxproj", "{46B6E88E-739A-406B-9F68-BC46C5950FA3}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mcl", "src\proj\mcl.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_tower_test", "test\proj\fp_tower_test\fp_tower_test.vcxproj", "{733B6250-D249-4A99-B2A6-C8FAF6A90E97}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bn_test", "test\proj\bn_test\bn_test.vcxproj", "{9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71}
+ EndProjectSection
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64
+ {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64
+ {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64
+ {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64
+ {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.ActiveCfg = Debug|x64
+ {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.Build.0 = Debug|x64
+ {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.ActiveCfg = Release|x64
+ {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.Build.0 = Release|x64
+ {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64
+ {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64
+ {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64
+ {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64
+ {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.ActiveCfg = Debug|x64
+ {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.Build.0 = Debug|x64
+ {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.ActiveCfg = Release|x64
+ {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.Build.0 = Release|x64
+ {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.ActiveCfg = Debug|x64
+ {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.Build.0 = Debug|x64
+ {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.ActiveCfg = Release|x64
+ {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/vendor/github.com/tangerine-network/mcl/misc/bench.txt b/vendor/github.com/tangerine-network/mcl/misc/bench.txt
new file mode 100644
index 000000000..3e18e6b44
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/bench.txt
@@ -0,0 +1,21 @@
+Core i7-7700 @ 3.6GHz
+ BN254 BLS12_381
+G1::mul 185.863Kclk 360.723Kclk
+G1::add 812.01 clk 1.540Kclk
+G1::dbl 837.24 clk 1.977Kclk
+G2::mul 340.125Kclk 642.457Kclk
+G2::add 2.233Kclk 4.368Kclk
+G2::dbl 2.134Kclk 4.088Kclk
+GT::pow 615.052Kclk 1.055Mclk
+G1::setStr chk 1.546Kclk 534.376Kclk
+G1::setStr 1.592Kclk 4.000Kclk
+G2::setStr chk 609.195Kclk 1.402Mclk
+G2::setStr 5.444Kclk 8.282Kclk
+hashAndMapToG1 26.997Kclk 336.207Kclk
+hashAndMapToG2 212.800Kclk 775.072Kclk
+pairing 909.076Kclk 2.367Mclk
+millerLoop 549.957Kclk 983.935Kclk
+finalExp 375.203Kclk 1.404Mclk
+precomputeG2 126.000Kclk 236.912Kclk
+precomputedML 427.272Kclk 729.234Kclk
+
diff --git a/vendor/github.com/tangerine-network/mcl/misc/karatsuba.cpp b/vendor/github.com/tangerine-network/mcl/misc/karatsuba.cpp
new file mode 100644
index 000000000..7c150c6e3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/karatsuba.cpp
@@ -0,0 +1,75 @@
+/*
+ sudo cpufreq-set -c 0 -g performance
+ mycl karatsuba.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out
+*/
+#include <stdio.h>
+#include <mcl/fp.hpp>
+#include <cybozu/xorshift.hpp>
+#include "../src/proto.hpp"
+#include "../src/low_func.hpp"
+#ifdef MCL_USE_LLVM
+#include "../src/low_func_llvm.hpp"
+#endif
+#include <cybozu/test.hpp>
+#include <cybozu/benchmark.hpp>
+
+typedef mcl::FpT<> Fp;
+
+using namespace mcl::fp;
+
+void dump(const Unit *x, size_t N)
+{
+ for (size_t i = 0; i < N; i++) {
+ printf("%016llx ", (long long)x[N - 1 - i]);
+ }
+ printf("\n");
+}
+
+void gggKara(uint64_t *z, const uint64_t *x, const uint64_t *)
+{
+ SqrPre<8, Gtag>::f(z, x);
+}
+void gggLLVM(uint64_t *z, const uint64_t *x, const uint64_t *y)
+{
+ MulPre<8, Ltag>::f(z, x, y);
+}
+
+template<size_t N>
+void benchKaratsuba()
+{
+ cybozu::XorShift rg;
+ printf("N=%d\n", (int)N);
+ Unit z[N * 2];
+ rg.read(z, N);
+ CYBOZU_BENCH("g:mulPre ", (MulPreCore<N, Gtag>::f), z, z, z);
+// CYBOZU_BENCH("g:mulKara", (MulPre<N, Gtag>::karatsuba), z, z, z);
+ CYBOZU_BENCH("g:sqrPre ", (SqrPreCore<N, Gtag>::f), z, z);
+// CYBOZU_BENCH("g:sqrKara", (SqrPre<N, Gtag>::karatsuba), z, z);
+
+#ifdef MCL_USE_LLVM
+ CYBOZU_BENCH("l:mulPre ", (MulPreCore<N, Ltag>::f), z, z, z);
+ CYBOZU_BENCH("l:sqrPre ", (SqrPreCore<N, Ltag>::f), z, z);
+ CYBOZU_BENCH("l:mulKara", (MulPre<N, Ltag>::karatsuba), z, z, z);
+ CYBOZU_BENCH("l:sqrKara", (SqrPre<N, Ltag>::karatsuba), z, z);
+#endif
+}
+
+CYBOZU_TEST_AUTO(karatsuba)
+{
+ benchKaratsuba<4>();
+ benchKaratsuba<6>();
+ benchKaratsuba<8>();
+#if MCL_MAX_BIT_SIZE >= 640
+ benchKaratsuba<10>();
+#endif
+#if MCL_MAX_BIT_SIZE >= 768
+ benchKaratsuba<12>();
+#endif
+#if MCL_MAX_BIT_SIZE >= 896
+ benchKaratsuba<14>();
+#endif
+#if MCL_MAX_BIT_SIZE >= 1024
+ benchKaratsuba<16>();
+#endif
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/misc/mul.cpp b/vendor/github.com/tangerine-network/mcl/misc/mul.cpp
new file mode 100644
index 000000000..146ac33a9
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/mul.cpp
@@ -0,0 +1,58 @@
+/*
+ sudo cpufreq-set -c 0 -g performance
+ mycl mul.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out
+*/
+#include <stdio.h>
+#include <mcl/fp.hpp>
+#include <cybozu/xorshift.hpp>
+#include <cybozu/test.hpp>
+#include <cybozu/benchmark.hpp>
+
+typedef mcl::FpT<> Fp;
+
+using namespace mcl::fp;
+
+void dump(const Unit *x, size_t N)
+{
+ for (size_t i = 0; i < N; i++) {
+ printf("%016llx ", (long long)x[N - 1 - i]);
+ }
+ printf("\n");
+}
+
+CYBOZU_TEST_AUTO(mulPre)
+{
+ cybozu::XorShift rg;
+ const char *pTbl[] = {
+ "0x2523648240000001ba344d80000000086121000000000013a700000000000013",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff",
+ "6701817056313037086248947066310538444882082605308124576230408038843357549886356779857393369967010764802541005796711440355753503701056323603", // 462 bit
+ "4562440617622195218641171605700291324893228507248559930579192517899275167208677386505912811317371399778642309573594407310688704721375437998252661319722214188251994674360264950082874192246603471", // 640 bit
+ "1552518092300708935148979488462502555256886017116696611139052038026050952686376886330878408828646477950487730697131073206171580044114814391444287275041181139204454976020849905550265285631598444825262999193716468750892846853816057031", // 768 bit
+ };
+ const size_t N = 16;
+ const Mode modeTbl[] = {
+ FP_GMP_MONT,
+#ifdef MCL_USE_LLVM
+ FP_LLVM_MONT,
+#endif
+ };
+ for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(modeTbl); j++) {
+ Mode mode = modeTbl[j];
+ printf("%s\n", ModeToStr(mode));
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) {
+ const char *p = pTbl[i];
+ Fp::init(p, mode);
+ printf("bitSize=%d\n", (int)Fp::getBitSize());
+ const Op& op = Fp::getOp();
+ Unit x[N], y[N * 2];
+ rg.read(x, N);
+ rg.read(y, N * 2);
+ CYBOZU_BENCH("mul ", op.fp_mul, y, y, x, op.p);
+ CYBOZU_BENCH("sqr ", op.fp_sqr, y, y, op.p);
+ CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, y, y, y);
+ CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, y, y);
+ CYBOZU_BENCH("mod ", op.fpDbl_mod, y, y, op.p);
+ }
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/misc/precompute.cpp b/vendor/github.com/tangerine-network/mcl/misc/precompute.cpp
new file mode 100644
index 000000000..63cdd663b
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/precompute.cpp
@@ -0,0 +1,30 @@
+#include <mcl/bn256.hpp>
+#include <iostream>
+
+using namespace mcl::bn;
+
+int main()
+{
+ initPairing(mcl::BN254);
+ G2 Q;
+ mapToG2(Q, 1);
+ std::vector<Fp6> Qcoeff;
+ precomputeG2(Qcoeff, Q);
+ puts("#if MCL_SIZEOF_UNIT == 8");
+ puts("static const uint64_t QcoeffTblBN254[][6][4] = {");
+ for (size_t i = 0; i < Qcoeff.size(); i++) {
+ const Fp6& x6 = Qcoeff[i];
+ puts("\t{");
+ for (size_t j = 0; j < 6; j++) {
+ printf("\t\t{");
+ const Fp& x = x6.getFp0()[j];
+ for (size_t k = 0; k < 4; k++) {
+ printf("0x%016llxull,", (unsigned long long)x.getUnit()[k]);
+ }
+ puts("},");
+ }
+ puts("\t},");
+ }
+ puts("};");
+ puts("#endif");
+}
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/bench.sh b/vendor/github.com/tangerine-network/mcl/misc/she/bench.sh
new file mode 100644
index 000000000..ced87b4db
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/bench.sh
@@ -0,0 +1,6 @@
+for i in 4 6 8
+do echo $i
+touch test/she_test.cpp
+make bin/she_test.exe CFLAGS_USER=-DMCLBN_FP_UNIT_SIZE=$i
+bin/she_test.exe > misc/she/bench$i.txt
+done
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/bench4.txt b/vendor/github.com/tangerine-network/mcl/misc/she/bench4.txt
new file mode 100644
index 000000000..99b2593c4
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/bench4.txt
@@ -0,0 +1,99 @@
+ctest:module=log
+CurveFp254BNb
+ctest:module=HashTable
+ctest:module=GTHashTable
+ctest:module=enc_dec
+ctest:module=add_sub_mul
+ctest:module=add_mul_add_sub
+ctest:module=innerProduct
+ctest:module=io
+ctest:module=bench
+enc 673.772Kclk
+add 8.021Kclk
+mul 4.042Mclk
+dec 2.194Mclk
+add after mul 20.693Kclk
+ctest:module=saveHash
+ctest:module=hashBench
+Kclk
+m=000fffff decG1 1.83e+02
+m=001fffff decG1 1.83e+02
+m=003fffff decG1 1.83e+02
+m=007fffff decG1 1.90e+02
+m=00ffffff decG1 2.04e+02
+m=01ffffff decG1 2.66e+02
+m=03ffffff decG1 4.17e+02
+m=07ffffff decG1 7.15e+02
+m=0fffffff decG1 1.29e+03
+m=1fffffff decG1 2.43e+03
+m=3fffffff decG1 4.70e+03
+m=7fffffff decG1 9.28e+03
+
+m=000fffff decG2 4.09e+02
+m=001fffff decG2 4.11e+02
+m=003fffff decG2 4.09e+02
+m=007fffff decG2 4.23e+02
+m=00ffffff decG2 4.48e+02
+m=01ffffff decG2 5.21e+02
+m=03ffffff decG2 7.25e+02
+m=07ffffff decG2 1.11e+03
+m=0fffffff decG2 1.87e+03
+m=1fffffff decG2 3.36e+03
+m=3fffffff decG2 6.38e+03
+m=7fffffff decG2 1.24e+04
+
+m=000fffff decGT 2.20e+03
+m=001fffff decGT 2.21e+03
+m=003fffff decGT 2.20e+03
+m=007fffff decGT 2.21e+03
+m=00ffffff decGT 2.23e+03
+m=01ffffff decGT 2.28e+03
+m=03ffffff decGT 2.37e+03
+m=07ffffff decGT 2.56e+03
+m=0fffffff decGT 2.94e+03
+m=1fffffff decGT 3.78e+03
+m=3fffffff decGT 5.41e+03
+m=7fffffff decGT 8.69e+03
+large m
+G1::add 7.36e-01
+G1::mul 1.92e+02
+G2::add 3.51e+00
+G2::mul 4.03e+02
+GT::mul 5.47e+00
+GT::pow 7.27e+02
+G1window 1.92e+01
+G2window 6.15e+01
+GTwindow 1.35e+02
+miller 6.69e+02
+finalExp 4.23e+02
+precomML 5.16e+02
+small m = 2097151
+G1::mul 4.52e+01
+G2::mul 1.01e+02
+GT::pow 1.33e+02
+G1window 1.55e+00
+G2window 5.02e+00
+GTwindow 1.55e+01
+encG1 2.10e+02
+encG2 4.82e+02
+encGT 2.47e+03
+encG1pre 5.31e+01
+encG2pre 1.47e+02
+encGTpre 6.01e+02
+decG1 1.84e+02
+decG2 3.96e+02
+degGT 2.20e+03
+mul 4.07e+03
+addG1 1.56e+00
+addG2 4.72e+00
+addGT 2.12e+01
+reRandG1 2.10e+02
+reRandG2 4.71e+02
+reRandGT 2.49e+03
+reRandG1pre 5.16e+01
+reRandG2pre 1.44e+02
+reRandGTpre 6.10e+02
+mulG1 9.03e+01
+mulG2 2.03e+02
+mulGT 5.34e+02
+ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/bench6.txt b/vendor/github.com/tangerine-network/mcl/misc/she/bench6.txt
new file mode 100644
index 000000000..863f7129a
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/bench6.txt
@@ -0,0 +1,99 @@
+ctest:module=log
+CurveFp382_1
+ctest:module=HashTable
+ctest:module=GTHashTable
+ctest:module=enc_dec
+ctest:module=add_sub_mul
+ctest:module=add_mul_add_sub
+ctest:module=innerProduct
+ctest:module=io
+ctest:module=bench
+enc 2.077Mclk
+add 17.694Kclk
+mul 13.408Mclk
+dec 5.854Mclk
+add after mul 41.570Kclk
+ctest:module=saveHash
+ctest:module=hashBench
+Kclk
+m=000fffff decG1 5.34e+02
+m=001fffff decG1 5.36e+02
+m=003fffff decG1 5.34e+02
+m=007fffff decG1 5.48e+02
+m=00ffffff decG1 5.87e+02
+m=01ffffff decG1 7.11e+02
+m=03ffffff decG1 9.53e+02
+m=07ffffff decG1 1.41e+03
+m=0fffffff decG1 2.30e+03
+m=1fffffff decG1 4.11e+03
+m=3fffffff decG1 7.71e+03
+m=7fffffff decG1 1.50e+04
+
+m=000fffff decG2 1.27e+03
+m=001fffff decG2 1.27e+03
+m=003fffff decG2 1.27e+03
+m=007fffff decG2 1.30e+03
+m=00ffffff decG2 1.35e+03
+m=01ffffff decG2 1.53e+03
+m=03ffffff decG2 1.88e+03
+m=07ffffff decG2 2.55e+03
+m=0fffffff decG2 3.87e+03
+m=1fffffff decG2 6.53e+03
+m=3fffffff decG2 1.18e+04
+m=7fffffff decG2 2.25e+04
+
+m=000fffff decGT 6.01e+03
+m=001fffff decGT 6.03e+03
+m=003fffff decGT 6.01e+03
+m=007fffff decGT 6.04e+03
+m=00ffffff decGT 6.08e+03
+m=01ffffff decGT 6.17e+03
+m=03ffffff decGT 6.39e+03
+m=07ffffff decGT 6.71e+03
+m=0fffffff decGT 7.44e+03
+m=1fffffff decGT 8.95e+03
+m=3fffffff decGT 1.20e+04
+m=7fffffff decGT 1.80e+04
+large m
+G1::add 1.48e+00
+G1::mul 5.44e+02
+G2::add 6.91e+00
+G2::mul 1.28e+03
+GT::mul 1.04e+01
+GT::pow 2.04e+03
+G1window 5.57e+01
+G2window 2.04e+02
+GTwindow 4.03e+02
+miller 2.09e+03
+finalExp 1.50e+03
+precomML 1.63e+03
+small m = 2097151
+G1::mul 8.29e+01
+G2::mul 2.05e+02
+GT::pow 2.66e+02
+G1window 3.18e+00
+G2window 1.14e+01
+GTwindow 3.19e+01
+encG1 6.01e+02
+encG2 1.49e+03
+encGT 7.66e+03
+encG1pre 1.41e+02
+encG2pre 4.71e+02
+encGTpre 1.76e+03
+decG1 5.37e+02
+decG2 1.27e+03
+degGT 6.02e+03
+mul 1.34e+04
+addG1 3.07e+00
+addG2 1.02e+01
+addGT 4.18e+01
+reRandG1 5.99e+02
+reRandG2 1.49e+03
+reRandGT 7.69e+03
+reRandG1pre 1.40e+02
+reRandG2pre 4.68e+02
+reRandGTpre 1.75e+03
+mulG1 1.65e+02
+mulG2 4.14e+02
+mulGT 1.06e+03
+ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/bench8.txt b/vendor/github.com/tangerine-network/mcl/misc/she/bench8.txt
new file mode 100644
index 000000000..f8fe8fd75
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/bench8.txt
@@ -0,0 +1,99 @@
+ctest:module=log
+CurveFp462
+ctest:module=HashTable
+ctest:module=GTHashTable
+ctest:module=enc_dec
+ctest:module=add_sub_mul
+ctest:module=add_mul_add_sub
+ctest:module=innerProduct
+ctest:module=io
+ctest:module=bench
+enc 5.095Mclk
+add 36.280Kclk
+mul 30.163Mclk
+dec 12.974Mclk
+add after mul 76.646Kclk
+ctest:module=saveHash
+ctest:module=hashBench
+Kclk
+m=000fffff decG1 1.44e+03
+m=001fffff decG1 1.45e+03
+m=003fffff decG1 1.45e+03
+m=007fffff decG1 1.47e+03
+m=00ffffff decG1 1.54e+03
+m=01ffffff decG1 1.70e+03
+m=03ffffff decG1 2.03e+03
+m=07ffffff decG1 2.64e+03
+m=0fffffff decG1 3.88e+03
+m=1fffffff decG1 6.32e+03
+m=3fffffff decG1 1.12e+04
+m=7fffffff decG1 2.11e+04
+
+m=000fffff decG2 2.99e+03
+m=001fffff decG2 3.01e+03
+m=003fffff decG2 2.99e+03
+m=007fffff decG2 3.05e+03
+m=00ffffff decG2 3.15e+03
+m=01ffffff decG2 3.41e+03
+m=03ffffff decG2 3.93e+03
+m=07ffffff decG2 4.95e+03
+m=0fffffff decG2 6.97e+03
+m=1fffffff decG2 1.10e+04
+m=3fffffff decG2 1.91e+04
+m=7fffffff decG2 3.54e+04
+
+m=000fffff decGT 1.31e+04
+m=001fffff decGT 1.31e+04
+m=003fffff decGT 1.31e+04
+m=007fffff decGT 1.31e+04
+m=00ffffff decGT 1.32e+04
+m=01ffffff decGT 1.33e+04
+m=03ffffff decGT 1.36e+04
+m=07ffffff decGT 1.43e+04
+m=0fffffff decGT 1.56e+04
+m=1fffffff decGT 1.82e+04
+m=3fffffff decGT 2.34e+04
+m=7fffffff decGT 3.39e+04
+large m
+G1::add 3.40e+00
+G1::mul 1.41e+03
+G2::add 1.38e+01
+G2::mul 2.93e+03
+GT::mul 1.94e+01
+GT::pow 4.30e+03
+G1window 1.59e+02
+G2window 4.89e+02
+GTwindow 8.96e+02
+miller 4.99e+03
+finalExp 3.26e+03
+precomML 3.71e+03
+small m = 2097151
+G1::mul 1.53e+02
+G2::mul 3.85e+02
+GT::pow 4.88e+02
+G1window 6.96e+00
+G2window 2.17e+01
+GTwindow 5.83e+01
+encG1 1.62e+03
+encG2 3.48e+03
+encGT 1.79e+04
+encG1pre 3.67e+02
+encG2pre 1.09e+03
+encGTpre 3.88e+03
+decG1 1.45e+03
+decG2 3.02e+03
+degGT 1.31e+04
+mul 3.02e+04
+addG1 7.08e+00
+addG2 2.03e+01
+addGT 7.68e+01
+reRandG1 1.63e+03
+reRandG2 3.48e+03
+reRandGT 1.79e+04
+reRandG1pre 3.65e+02
+reRandG2pre 1.08e+03
+reRandGTpre 3.79e+03
+mulG1 3.08e+02
+mulG2 7.65e+02
+mulGT 1.95e+03
+ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/nizkp.pdf b/vendor/github.com/tangerine-network/mcl/misc/she/nizkp.pdf
new file mode 100644
index 000000000..7e61b5a64
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/nizkp.pdf
Binary files differ
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/she-api-ja.md b/vendor/github.com/tangerine-network/mcl/misc/she/she-api-ja.md
new file mode 100644
index 000000000..850f11ff3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/she-api-ja.md
@@ -0,0 +1,314 @@
+# L2準同型暗号ライブラリshe
+
+# 概要
+she(somewhat homomorphic encryption)はペアリングベースのL2準同型暗号と呼ばれる公開鍵暗号ライブラリである。
+L2準同型暗号とは暗号文同士の加算を複数回、乗算を一度だけできる性質を表す。
+
+特に2個の整数値ベクトルx = (x_i), y = (y_i)の各要素が暗号化された状態で、その2個のベクトルの内積を暗号化したまま計算できる。
+
+ΣEnc(x_i) Enc(y_i) = Enc(Σx_i y_i).
+
+# 特長
+* ペアリングベースの最新アルゴリズムを実装
+ * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632)
+* C++版はWindows(x64), Linux(x64, ARM64), OSX(x64)に対応
+* JavaScript(WebAssembly 以降JSと記す)版はChrome, Firefox, Edge, Safari(Android, iPhone含む), Node.jsに対応
+
+# クラスと主な機能
+
+## 主なクラス
+* 秘密鍵クラス SecretKey
+* 公開鍵クラス PublicKey
+* 暗号文クラス CipherTextG1, CipherTextG2, CipherTextGT
+* ゼロ知識証明クラス ZkpBin, ZkpEq, ZkpBinEq
+
+## 暗号化と復号方法
+* 秘密鍵から公開鍵を作成する
+* 公開鍵を用いて整数から暗号文を作る
+* 秘密鍵を用いて暗号文を復号する
+
+## 暗号文同士の計算
+* 同じ暗号文クラス同士は加算・減算できる
+* CipherTextG1とCipherTextG2を乗算するとCipherTextGTになる
+
+## 復号の重要な注意点
+* このsheは復号時に小さな離散対数問題(DLP)を解く必要がある
+* DLPのテーブルサイズをs、暗号文をEnc(m)とすると復号時間はm/sに比例する
+* テーブルサイズの設定は`setRangeForDLP(s)`を使う
+ * `m/s`の最大値は`setTryNum(tryNum)`で行う
+
+## ゼロ知識証明クラス
+* mを暗号するときに同時にゼロ知識証明を生成する
+* 暗号文と生成されたゼロ知識証明と公開鍵でmに関する制約条件を検証できる
+
+# JS版
+
+## Node.jsでの読み込み
+
+```
+>npm install she-wasm
+>node
+>const she = require('she-wasm')
+```
+
+## ブラウザでの読み込み
+[she-wasm](https://github.com/herumi/she-wasm/)のshe.js, she\_c.js, she\_c.wasmファイルを同じディレクトリに置いてshe.jsを読み込む
+```
+// HTML
+<script src="she.js"></script>
+```
+
+## JS版サンプル
+
+```
+// システムの初期化
+she.init().then(() => {
+ const sec = new she.SecretKey()
+ // 秘密鍵の初期化
+ sec.setByCSPRNG()
+
+ // 秘密鍵secから公開鍵pubを作成
+ const pub = sec.getPublicKey()
+
+ const m1 = 1
+ const m2 = 2
+ const m3 = 3
+ const m4 = -1
+
+ // 平文m1とm2をCipherTextG1として暗号化
+ const c11 = pub.encG1(m1)
+ const c12 = pub.encG1(m2)
+
+ // 平文m3とm4をCipherTextG2として暗号化
+ const c21 = pub.encG2(m3)
+ const c22 = pub.encG2(m4)
+
+ // c11とc12, c21とc22をそれぞれ加算
+ const c1 = she.add(c11, c12)
+ const c2 = she.add(c21, c22)
+
+ // c1とc2を乗算するとCipherTextGT型になる
+ const ct = she.mul(c1, c2)
+
+ // 暗号文ctを復号する
+ console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`)
+})
+```
+
+# C++版サンプル
+ライブラリのビルドは[mcl](https://github.com/herumi/mcl/#installation-requirements)を参照
+```
+#include <mcl/she.hpp>
+int main()
+ try
+{
+ using namespace mcl::she;
+ // システのム初期化
+ init();
+
+ SecretKey sec;
+
+ // 秘密鍵の初期化
+ sec.setByCSPRNG();
+
+ // 秘密鍵secから公開鍵pubを作成
+ PublicKey pub;
+ sec.getPublicKey(pub);
+
+ int m1 = 1;
+ int m2 = 2;
+ int m3 = 3;
+ int m4 = -1;
+
+ // 平文m1とm2をCipherTextG1として暗号化
+ CipherTextG1 c11, c12;
+ pub.enc(c11, m1);
+ pub.enc(c12, m2);
+
+ // 平文m3とm4をCipherTextG2として暗号化
+ CipherTextG2 c21, c22;
+ pub.enc(c21, m3);
+ pub.enc(c22, m4);
+
+ // c11とc12, c21とc22をそれぞれ加算
+ CipherTextG1 c1;
+ CipherTextG2 c2;
+ CipherTextG1::add(c1, c11, c12);
+ CipherTextG2::add(c2, c21, c22);
+
+ // c1とc2を乗算するとCipherTextGT型になる
+ CipherTextGT ct;
+ CipherTextGT::mul(ct, c1, c2);
+
+ // 暗号文ctを復号する
+ printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct));
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+ return 1;
+}
+
+```
+
+# クラス共通メソッド
+
+## シリアライズ(C++)
+
+* `setStr(const std::string& str, int ioMode = 0)`
+ * ioModeに従ってstrで設定する
+
+* `getStr(std::string& str, int ioMode = 0) const`
+* `std::string getStr(int ioMode = 0) const`
+ * ioModeに従ってstrを取得する
+* `size_t serialize(void *buf, size_t maxBufSize) const`
+ * maxBufSize確保されたbufにシリアライズする
+ * bufに書き込まれたbyte長が返る
+ * エラーの場合は0が返る
+* `size_t deserialize(const void *buf, size_t bufSize)`
+ * bufから最大bufSizeまで値を読み込みデリシアライズする
+ * 読み込まれたbyte長が返る
+ * エラーの場合は0が返る
+
+## シリアライズ(JS)
+
+* `deserialize(s)`
+ * Uint8Array型sでデシリアライズ
+* `serialize()`
+ * シリアライズしてUint8Arrayの値を返す
+* `deserializeHexStr(s)`
+ * 16進数文字列sでデシリアライズ
+* `serializeToHexStr()`
+ * 16進数文字列sでシリアライズ
+
+## ioMode
+
+* 2 ; 2進数
+* 10 ; 10進数
+* 16 ; 16進数
+* IoPrefix ; 2または16とorの値を設定すると0bまたは0xがつく
+* IoEcAffine ; (G1, G2のみ)アフィン座標
+* IoEcProj ; (G1, G2のみ)射影座標
+* IoSerialize ; serialize()/deserialize()と同じ
+
+## 注意
+* C++の名前空間は`mcl::she`
+* 以下CTはCipherTextG1, CipherTextG2, CipherTextGTのいずれかを表す
+* JS版の平文は32ビット整数の範囲に制限される
+
+## SecretKeyクラス
+
+* `void setByCSPRNG()`(C++)
+* `void setByCSPRNG()`(JS)
+ * 疑似乱数で秘密鍵を初期化する
+
+* `int64_t dec(const CT& c) const`(C++)
+* `int dec(CT c)`(JS)
+ * 暗号文cを復号する
+* `int64_t decViaGT(const CipherTextG1& c) const`(C++)
+* `int64_t decViaGT(const CipherTextG2& c) const`(C++)
+* `int decViaGT(CT c)`(JS)
+ * 暗号文をGT経由で復号する
+* `bool isZero(const CT& c) const`(C++)
+* `bool isZero(CT c)`(JS)
+ * cの復号結果が0ならばtrue
+ * decしてから0と比較するよりも高速
+
+## PublicKey, PrecomputedPublicKeyクラス
+PrecomputedPublicKeyはPublicKeyの高速版
+
+* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++)
+* `void PrecomputedPublicKey::init(pub)`(JS)
+ * 公開鍵pubでPrecomputedPublicKeyを初期化する
+
+
+* `PrecomputedPublicKey::destroy()`(JS)
+ * JavaScriptではPrecomputedPublicKeyが不要になったらこのメソッドを呼ぶ必要がある
+ * そうしないとメモリリークする
+
+以下はPK = PublicKey or PrecomputedPublicKey
+
+* `void PK::enc(CT& c, int64_t m) const`(C++)
+* `CipherTextG1 PK::encG1(m)`(JS)
+* `CipherTextG2 PK::encG2(m)`(JS)
+* `CipherTextGT PK::encGT(m)`(JS)
+ * mを暗号化してcにセットする(またはその値を返す)
+
+* `void PK::reRand(CT& c) const`(C++)
+* `CT PK::reRand(CT c)`(JS)
+ * cを再ランダム化する
+ * 再ランダム化された暗号文と元の暗号文は同じ平文を暗号化したものかどうか判定できない
+
+* `void convert(CipherTextGT& cm, const CT& ca) const`
+* `CipherTextGT convert(CT ca)`
+ * 暗号文ca(CipherTextG1かCipherTextG2)をCipherTextGTに変換する
+
+## CipherTextクラス
+
+* `void CT::add(CT& z, const CT& x const CT& y)`(C++)
+* `CT she.add(CT x, CT y)`(JS)
+ * 暗号文xと暗号文yを足してzにセットする(またはその値を返す)
+* `void CT::sub(CT& z, const CT& x const CT& y)`(C++)
+* `CT she.sub(CT x, CT y)`(JS)
+ * 暗号文xから暗号文yを引いてzにセットする(またはその値を返す)
+* `void CT::neg(CT& y, const CT& x)`(C++)
+* `void she.neg(CT x)`(JS)
+ * 暗号文xの符号反転をyにセットする(またはその値を返す)
+* `void CT::mul(CT& z, const CT& x, int y)`(C++)
+* `CT she.mulInt(CT x, int y)`(JS)
+ * 暗号文xを整数倍yしてzにセットする(またはその値を返す)
+
+* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++)
+* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS)
+ * 暗号文xと暗号文yを掛けてzにセットする(またはその値を返す)
+
+* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++)
+ * 暗号文xと暗号文yを掛けて(Millerループだけして)zにセットする(またはその値を返す)
+* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++)
+ * mul(a, b) = finalExp(mulML(a, b))
+ * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d)))
+ * すなわち積和演算はmulMLしたものを足してから最後に一度finalExpするのがよい
+
+## ゼロ知識証明クラス
+
+### 概要
+* ZkpBin 暗号文encGi(m)(i = 1, 2, T)についてm = 0または1であることを復号せずに検証できる
+* ZkpEq 暗号文encG1(m1), encG2(m2)についてm1 = m2であることを検証できる
+* ZkpBinEq 暗号文encG1(m1), encG2(m2)についてm1 = m2 = 0または1であることを検証できる
+
+### API
+PK = PublicKey or PrecomputedPublicKey
+
+* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++)
+* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++)
+* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS)
+* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS)
+ * m(=0 or 1)を暗号化して暗号文cとゼロ知識証明zkpをセットする(または[c, zkp]を返す)
+ * mが0でも1でもなければ例外
+* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++)
+* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS)
+ * mを暗号化して暗号文c1, c2とゼロ知識証明zkpをセットする(または[c1, c2, zkp]を返す)
+* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++)
+* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS)
+ * m(=0 or 1)を暗号化して暗号文c1, c2とゼロ知識証明zkpをセットする(または[c1, c2, zkp]を返す)
+ * mが0でも1でもなければ例外
+
+## グローバル関数
+
+* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++)
+* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS)
+ * hashSizeの大きさの復号用テーブルとtryNumを元に初期化する
+ * 復号可能な平文mの範囲は|m| <= hashSize * tryNum
+* `she.loadTableForGTDLP(Uint8Array a)`(JS)
+ * 復号用テーブルを読み込む
+ * 現在は`https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin`のみがある
+* `void useDecG1ViaGT(bool use)`(C++/JS)
+* `void useDecG2ViaGT(bool use)`(C++/JS)
+ * CipherTextG1, CipherTextG2の復号をCipherTextGT経由で行う
+ * 大きな値を復号するときはDLP用の巨大なテーブルをそれぞれに持つよりもGTに集約した方が効率がよい
+
+# ライセンス
+
+このライブラリは[修正BSDライセンス](https://github.com/herumi/mcl/blob/master/COPYRIGHT)で提供されます
+
+# 開発者
+
+光成滋生 MITSUNARI Shigeo(herumi@nifty.com)
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/she-api.md b/vendor/github.com/tangerine-network/mcl/misc/she/she-api.md
new file mode 100644
index 000000000..af54311e9
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/she-api.md
@@ -0,0 +1,322 @@
+# she ; Two-level homomorphic encryption library for browser/Node.js by WebAssembly
+
+# Abstruct
+she is a somewhat(two-level) homomorphic encryption library,
+which is based on pairings.
+This library supports polynomially many homomorphic additions and
+one multiplication over encrypted data.
+
+Especially, the inner products of two encrypted integer vectors such as Enc(x) = (Enc(x_i)), Enc(y) = (Enc(y_i))
+can be computed.
+
+Sum_i Enc(x_i) Enc(y_i) = Enc(Sum_i x_i y_i).
+
+# Features
+* supports the latest pairing based algorithm
+ * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632)
+* supports Windows(x64), Linux(x64, ARM64), OSX(x64)
+* supports JavaScript(WebAssembly), Chrome, Firefox, Safari(contains Android, iPhone), Node.js
+
+# Classes
+
+## Main classes
+* secret key class ; SecretKey
+* public key class ; PublicKey
+* ciphertext class ; CipherTextG1, CipherTextG2, CipherTextGT
+* zero-knowledge proof class ; ZkpBin, ZkpEq, ZkpBinEq
+
+## Encryption and decryption
+* create the corresponding public key from a secret key
+* encrypt an integer(plaintext) with a public key
+* decrypt a ciphertext with a secret key
+
+## Homomorphic operations
+* homomorphic addtion/substraction over ciphertexts of the same ciphertext class
+* homomprphic multiplication over ciphertext of CipherTextG1 and CipherTextG2
+ * The class of the result is CipherTextGT.
+
+## Important notation of decryption
+* This library requires to solve a small DLP to decrypt a ciphertext.
+* The decryption timing is O(m/s), where s is the size of table to solve DLP, and m is the size fo a plaintext.
+* call `setRangeForDLP(s)` to set the table size.
+ * The maximun `m/s` is set by `setTryNum(tryNum)`.
+
+## Zero-knowledge proof class
+* A zero-knowledge proof is simultaneously created when encrypting a plaintext `m`.
+* The restriction according to `m` can be verified with a created zero-knowledge proof and a public key.
+
+# Setup for JavaScript(JS)
+
+## for Node.js
+
+```
+>npm install she-wasm
+>node
+>const she = require('she-wasm')
+```
+
+## for a browser
+
+Copy `she.js`, `she\_c.js`, `she\_c.wasm` to your directory from [she-wasm](https://github.com/herumi/she-wasm/),
+and read `she.js`.
+```
+// HTML
+<script src="she.js"></script>
+```
+
+## A sample for JS
+
+```
+// initialize a library
+she.init().then(() => {
+ const sec = new she.SecretKey()
+ // initialize a secret key by CSPRNG(cryptographically secure pseudo random number generator)
+ sec.setByCSPRNG()
+
+ // create a public key from a secret key
+ const pub = sec.getPublicKey()
+
+ const m1 = 1
+ const m2 = 2
+ const m3 = 3
+ const m4 = -1
+
+ // encrypt m1 and m2 as CipherTextG1 class
+ const c11 = pub.encG1(m1)
+ const c12 = pub.encG1(m2)
+
+ // encrypt m3 and m4 as CipherTextG2 class
+ const c21 = pub.encG2(m3)
+ const c22 = pub.encG2(m4)
+
+ // add c11 and c12, c21 and c22 respectively
+ const c1 = she.add(c11, c12)
+ const c2 = she.add(c21, c22)
+
+ // get ct as a CipherTextGT class by multiplying c1 with c2
+ const ct = she.mul(c1, c2)
+
+ // decrypt ct
+ console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`)
+})
+```
+
+# A sample for C++
+How to build the library, see [mcl](https://github.com/herumi/mcl/#installation-requirements).
+```
+#include <mcl/she.hpp>
+int main()
+ try
+{
+ using namespace mcl::she;
+ // initialize a library
+ init();
+
+ SecretKey sec;
+
+ // initialize a secret key by CSPRNG
+ sec.setByCSPRNG();
+
+ // create a public key from a secret key
+ PublicKey pub;
+ sec.getPublicKey(pub);
+
+ int m1 = 1;
+ int m2 = 2;
+ int m3 = 3;
+ int m4 = -1;
+
+ // encrypt m1 and m2 as CipherTextG1 class
+ CipherTextG1 c11, c12;
+ pub.enc(c11, m1);
+ pub.enc(c12, m2);
+
+ // encrypt m3 and m4 as CipherTextG2 class
+ CipherTextG2 c21, c22;
+ pub.enc(c21, m3);
+ pub.enc(c22, m4);
+
+ // add c11 and c12, c21 and c22 respectively
+ CipherTextG1 c1;
+ CipherTextG2 c2;
+ CipherTextG1::add(c1, c11, c12);
+ CipherTextG2::add(c2, c21, c22);
+
+ // get ct as a CipherTextGT class by multiplying c1 with c2
+ CipherTextGT ct;
+ CipherTextGT::mul(ct, c1, c2);
+
+ // decrypt ct
+ printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct));
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+ return 1;
+}
+
+```
+# Class method
+
+## Serialization(C++)
+
+* `setStr(const std::string& str, int ioMode = 0)`
+ * set a value by `str` according to `ioMode`
+
+* `getStr(std::string& str, int ioMode = 0) const`
+* `std::string getStr(int ioMode = 0) const`
+ * get a string `str` according to `ioMode`
+* `size_t serialize(void *buf, size_t maxBufSize) const`
+ * serialize a value to buf which has maxBufSize byte size
+ * return the byte size to be written in `buf`
+ * return zero if error
+* `size_t deserialize(const void *buf, size_t bufSize)`
+ * deserialize a value from buf which has bufSize byte size
+ * return the byte size to be read from `buf`
+ * return zero if error
+
+## Serialization(JS)
+
+* `deserialize(s)`
+ * deserialize from `s` as Uint8Array type
+* `serialize()`
+ * serialize a value and return Uint8Array value
+* `deserializeHexStr(s)`
+ * deserialize as a hexadecimal string
+* `serializeToHexStr()`
+ * serialize as a hexadecimal string
+
+## ioMode
+
+* 2 ; binary number
+* 10 ; decimal number
+* 16 ; hexadecimal number
+* IoPrefix ; append a prefix 0b(resp. 2) or 0x(resp. 16)
+* IoEcAffine ; affine coordinate (for only G1, G2)
+* IoEcProj ; projective coordinate (for only G1, G2)
+* IoSerialize ; same as serialize()/deserialize()
+
+## Notation
+* the namespace of C++ is `mcl::she`
+* CT means one of CipherTextG1, CipherTextG2, CipherTextGT
+* The range of plaintext is rectricted as a 32-bit integer for JS
+
+## SecretKey class
+
+* `void setByCSPRNG()`(C++)
+* `void setByCSPRNG()`(JS)
+ * set a secret key by CSPRNG(cryptographically secure pseudo random number generator)
+
+* `int64_t dec(const CT& c) const`(C++)
+* `int dec(CT c)`(JS)
+ * decrypt `c`
+* `int64_t decViaGT(const CipherTextG1& c) const`(C++)
+* `int64_t decViaGT(const CipherTextG2& c) const`(C++)
+* `int decViaGT(CT c)`(JS)
+ * decrypt `c` through CipherTextGT
+* `bool isZero(const CT& c) const`(C++)
+* `bool isZero(CT c)`(JS)
+ * return true if decryption of `c` is zero
+ * it is faster than the timing of comparision with zero after decrypting `c`
+
+## PublicKey, PrecomputedPublicKey class
+`PrecomputedPublicKey` is a faster version of `PublicKey`
+
+* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++)
+* `void PrecomputedPublicKey::init(pub)`(JS)
+ * initialize `PrecomputedPublicKey` by a public key `pub`
+
+* `PrecomputedPublicKey::destroy()`(JS)
+ * It is necessary to call this method if this instance becomes unnecessary
+ * otherwise a memory leak will be caused
+
+PK means PublicKey or PrecomputedPublicKey
+
+* `void PK::enc(CT& c, int64_t m) const`(C++)
+* `CipherTextG1 PK::encG1(m)`(JS)
+* `CipherTextG2 PK::encG2(m)`(JS)
+* `CipherTextGT PK::encGT(m)`(JS)
+ * encrypt `m` and set `c`(or return the value)
+
+* `void PK::reRand(CT& c) const`(C++)
+* `CT PK::reRand(CT c)`(JS)
+ * rerandomize `c`
+ * For `c = Enc(m)`, the rerandomized ciphertext is hard to detect if it is generated by the rerandomization
+ or an encrypted `m` freshly again.
+
+* `void convert(CipherTextGT& cm, const CT& ca) const`
+* `CipherTextGT convert(CT ca)`
+ * convert `ca`(CipherTextG1 or CipherTextG2) to `CipherTextGT` class
+
+## CipherText class
+
+* `void CT::add(CT& z, const CT& x const CT& y)`(C++)
+* `CT she.add(CT x, CT y)`(JS)
+ * add `x` and `y` and set the value to `z`(or return the value)
+* `void CT::sub(CT& z, const CT& x const CT& y)`(C++)
+* `CT she.sub(CT x, CT y)`(JS)
+ * subtract `x` and `y` and set the value to `z`(or return the value)
+* `void CT::neg(CT& y, const CT& x)`(C++)
+* `void she.neg(CT x)`(JS)
+ * negate `x` and set the value to `y`(or return the value)
+* `void CT::mul(CT& z, const CT& x, int y)`(C++)
+* `CT she.mulInt(CT x, int y)`(JS)
+ * multiple `x` and `y` and set the value `y`(or return the value)
+
+* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++)
+* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS)
+ * multiple `x` and `y` and set the value `y`(or return the value)
+
+* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++)
+ * multiple(only Miller Loop) `x` and `y` and set the value `y`(or return the value)
+
+* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++)
+ * mul(a, b) = finalExp(mulML(a, b))
+ * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d)))
+ * i.e., innor product can be computed as once calling `finalExp` after computing `mulML` for each elements of two vectors and adding all
+
+## Zero knowledge proof class
+
+### Abstract
+* ZkpBin ; verify whether `m = 0` or `1` for ciphertexts `encGi(m)(i = 1, 2, T)`
+* ZkpEq ; verify whether `m1 = m2` for ciphertexts `encG1(m1)` and `encG2(m2)`
+* ZkpBinEq ; verify whether `m1 = m2 = 0` or `1` for ciphertexts `encG1(m1)` and `encG2(m2)`
+
+### API
+PK = PublicKey or PrecomputedPublicKey
+
+* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++)
+* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++)
+* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS)
+* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS)
+ * encrypt `m`(=0 or 1) and set the ciphertext `c` and zero-knowledge proof `zkp`(or returns [c, zkp])
+ * throw exception if m != 0 and m != 1
+* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++)
+* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS)
+ * encrypt `m` and set the ciphertext `c1`, `c2` and zero-knowledge proof `zk`(or returns [c1, c2, zkp])
+* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++)
+* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS)
+ * encrypt `m`(=0 or 1) and set ciphertexts `c1`, `c2` and zero-knowledge proof `zkp`(or returns [c1, c2, zkp])
+ * throw exception if m != 0 and m != 1
+
+## Global functions
+
+* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++)
+* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS)
+ * initialize a table to solve a DLP with `hashSize` size and set maximum trying count `tryNum`.
+ * the range `m` to be solvable is |m| <= hashSize * tryNum
+* `getHashTableGT().load(InputStream& is)`(C++)
+* `she.loadTableForGTDLP(Uint8Array a)`(JS)
+ * load a DLP table for CipherTextGT
+ * reset the value of `hashSize` used in `init()`
+ * `https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin` is a precomputed table
+* `void useDecG1ViaGT(bool use)`(C++/JS)
+* `void useDecG2ViaGT(bool use)`(C++/JS)
+ * decrypt a ciphertext of CipherTextG1 and CipherTextG2 through CipherTextGT
+ * it is better when decrypt a big value
+
+# License
+
+[modified new BSD License](https://github.com/herumi/mcl/blob/master/COPYRIGHT)
+
+# Author
+
+光成滋生 MITSUNARI Shigeo(herumi@nifty.com)
diff --git a/vendor/github.com/tangerine-network/mcl/misc/she/she.pdf b/vendor/github.com/tangerine-network/mcl/misc/she/she.pdf
new file mode 100644
index 000000000..355a308b3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/misc/she/she.pdf
Binary files differ
diff --git a/vendor/github.com/tangerine-network/mcl/mk.bat b/vendor/github.com/tangerine-network/mcl/mk.bat
new file mode 100644
index 000000000..19eb84197
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/mk.bat
@@ -0,0 +1,20 @@
+@echo off
+call setvar.bat
+if "%1"=="-s" (
+ echo use static lib
+ set CFLAGS=%CFLAGS% /DMCLBN_DONT_EXPORT
+) else if "%1"=="-d" (
+ echo use dynamic lib
+) else (
+ echo "mk (-s|-d) <source file>"
+ goto exit
+)
+set SRC=%2
+set EXE=%SRC:.cpp=.exe%
+set EXE=%EXE:.c=.exe%
+set EXE=%EXE:test\=bin\%
+set EXE=%EXE:sample\=bin\%
+echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS%
+cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS%
+
+:exit
diff --git a/vendor/github.com/tangerine-network/mcl/mklib.bat b/vendor/github.com/tangerine-network/mcl/mklib.bat
new file mode 100644
index 000000000..389b69009
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/mklib.bat
@@ -0,0 +1,39 @@
+@echo off
+call setvar.bat
+if "%1"=="dll" (
+ echo make dynamic library DLL
+) else (
+ echo make static library LIB
+)
+rem nasm -f win64 -D_WIN64 src\asm\low_x86-64.asm
+rem lib /OUT:lib\mcl.lib /nodefaultlib fp.obj src\asm\low_x86-64.obj
+
+echo cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj
+ cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj
+echo lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj
+ lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj
+
+if "%1"=="dll" (
+ echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj
+ cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj /DMCLBN_NO_AUTOLINK
+ echo link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib
+ link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib
+
+ echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj
+ cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj /DMCLBN_NO_AUTOLINK
+ echo link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib
+ link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib
+
+ echo cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK
+ cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK
+ echo link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib
+ link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib
+) else (
+ echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj
+ cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj
+ lib /nologo /OUT:lib\mclbn256.lib /nodefaultlib obj\bn_c256.obj lib\mcl.lib
+
+ echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj
+ cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj
+ lib /nologo /OUT:lib\mclbn384.lib /nodefaultlib obj\bn_c384.obj lib\mcl.lib
+)
diff --git a/vendor/github.com/tangerine-network/mcl/obj/.emptydir b/vendor/github.com/tangerine-network/mcl/obj/.emptydir
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/obj/.emptydir
diff --git a/vendor/github.com/tangerine-network/mcl/readme.md b/vendor/github.com/tangerine-network/mcl/readme.md
new file mode 100644
index 000000000..39b3d4d42
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/readme.md
@@ -0,0 +1,457 @@
+[![Build Status](https://travis-ci.org/herumi/mcl.png)](https://travis-ci.org/herumi/mcl)
+
+# mcl
+
+A portable and fast pairing-based cryptography library.
+
+# Abstract
+
+mcl is a library for pairing-based cryptography.
+The current version supports the optimal Ate pairing over BN curves and BLS12-381 curves.
+
+# News
+* (Break backward compatibility) libmcl_dy.a is renamed to libmcl.a
+ * The option SHARE_BASENAME_SUF is removed
+* 2nd argument of `mclBn_init` is changed from `maxUnitSize` to `compiledTimeVar`, which must be `MCLBN_COMPILED_TIME_VAR`.
+* break backward compatibility of mapToGi for BLS12. A map-to-function for BN is used.
+If `MCL_USE_OLD_MAPTO_FOR_BLS12` is defined, then the old function is used, but this will be removed in the future.
+
+# Support architecture
+
+* x86-64 Windows + Visual Studio
+* x86, x86-64 Linux + gcc/clang
+* ARM Linux
+* ARM64 Linux
+* (maybe any platform to be supported by LLVM)
+* WebAssembly
+
+# Support curves
+
+p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1.
+
+* BN254 ; a BN curve over the 254-bit prime p(z) where z = -(2^62 + 2^55 + 1).
+* BN\_SNARK1 ; a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity.
+* BN381\_1 ; a BN curve over the 381-bit prime p(z) where z = -(2^94 + 2^76 + 2^72 + 1).
+* BN462 ; a BN curve over the 462-bit prime p(z) where z = 2^114 + 2^101 - 2^14 - 1.
+* BLS12\_381 ; [a BLS12-381 curve](https://blog.z.cash/new-snark-curve/)
+
+# Benchmark
+
+## The latest benchmark(2018/11/7)
+
+### Intel Core i7-6700 3.4GHz(Skylake), Ubuntu 18.04.1 LTS
+
+curveType | binary|clang-6.0.0|gcc-7.3.0|
+----------|--------------------|-----------|---------|
+BN254 | bin/bn\_test.exe| 882Kclk| 933Kclk|
+BLS12-381 | bin/bls12\_test.exe| 2290Kclk| 2630Kclk|
+
+### Intel Core i7-7700 3.6GHz(Kaby Lake), Ubuntu 18.04.1 LTS on Windows 10 Vmware
+
+curveType | binary|clang-6.0.0|gcc-7.3.0|
+----------|--------------------|-----------|---------|
+BN254 | bin/bn\_test.exe| 900Kclk| 954Kclk|
+BLS12-381 | bin/bls12\_test.exe| 2340Kclk| 2680Kclk|
+
+* now investigating the reason why gcc is slower than clang.
+
+## Higher-bit BN curve benchmark
+
+For JavaScript(WebAssembly), see [ID based encryption demo](https://herumi.github.io/mcl-wasm/ibe-demo.html).
+
+paramter | x64| Firefox on x64|Safari on iPhone7|
+-----------|-----|---------------|-----------------|
+BN254 | 0.25| 2.48| 4.78|
+BN381\_1 | 0.95| 7.91| 11.74|
+BN462 | 2.16| 14.73| 22.77|
+
+* x64 : 'Kaby Lake Core i7-7700(3.6GHz)'.
+* Firefox : 64-bit version 58.
+* iPhone7 : iOS 11.2.1.
+* BN254 is by `test/bn_test.cpp`.
+* BN381\_1 and BN462 are by `test/bn512_test.cpp`.
+* All the timings are given in ms(milliseconds).
+
+The other benchmark results are [bench.txt](bench.txt).
+
+## An old benchmark of a BN curve BN254(2016/12/25).
+
+* x64, x86 ; Inte Core i7-6700 3.4GHz(Skylake) upto 4GHz on Ubuntu 16.04.
+ * `sudo cpufreq-set -g performance`
+* arm ; 900MHz quad-core ARM Cortex-A7 on Raspberry Pi2, Linux 4.4.11-v7+
+* arm64 ; 1.2GHz ARM Cortex-A53 [HiKey](http://www.96boards.org/product/hikey/)
+
+software | x64| x86| arm|arm64(msec)
+---------------------------------------------------------|------|-----|----|-----
+[ate-pairing](https://github.com/herumi/ate-pairing) | 0.21 | - | - | -
+mcl | 0.31 | 1.6 |22.6| 3.9
+[TEPLA](http://www.cipher.risk.tsukuba.ac.jp/tepla/) | 1.76 | 3.7 | 37 | 17.9
+[RELIC](https://github.com/relic-toolkit/relic) PRIME=254| 0.30 | 3.5 | 36 | -
+[MIRACL](https://github.com/miracl/MIRACL) ake12bnx | 4.2 | - | 78 | -
+[NEONabe](http://sandia.cs.cinvestav.mx/Site/NEONabe) | - | - | 16 | -
+
+* compile option for RELIC
+```
+cmake -DARITH=x64-asm-254 -DFP_PRIME=254 -DFPX_METHD="INTEG;INTEG;LAZYR" -DPP_METHD="LAZYR;OATEP"
+```
+
+# Installation Requirements
+
+* [GMP](https://gmplib.org/) and OpenSSL
+```
+apt install libgmp-dev libssl-dev
+```
+
+Create a working directory (e.g., work) and clone the following repositories.
+```
+mkdir work
+cd work
+git clone git://github.com/herumi/mcl
+git clone git://github.com/herumi/cybozulib_ext ; for only Windows
+```
+* Cybozulib\_ext is a prerequisite for running OpenSSL and GMP on VC (Visual C++).
+
+# (Option) Without GMP
+```
+make MCL_USE_GMP=0
+```
+Define `MCL_USE_VINT` before including `bn.hpp`
+
+# (Option) Without Openssl
+```
+make MCL_USE_OPENSSL=0
+```
+Define `MCL_DONT_USE_OPENSSL` before including `bn.hpp`
+
+# Build and test on x86-64 Linux, macOS, ARM and ARM64 Linux
+To make lib/libmcl.a and test it:
+```
+cd work/mcl
+make test
+```
+To benchmark a pairing:
+```
+bin/bn_test.exe
+```
+To make sample programs:
+```
+make sample
+```
+
+if you want to change compiler options for optimization, then set `CFLAGS_OPT_USER`.
+```
+make CLFAGS_OPT_USER="-O2"
+```
+
+## Build for 32-bit Linux
+Build openssl and gmp for 32-bit mode and install `<lib32>`
+```
+make ARCH=x86 CFLAGS_USER="-I <lib32>/include" LDFLAGS_USER="-L <lib32>/lib -Wl,-rpath,<lib32>/lib"
+```
+
+## Build for 64-bit Windows
+1) make static library and use it
+
+```
+mklib
+mk -s test\bn_c256_test.cpp
+bin\bn_c256_test.exe
+```
+2) make dynamic library and use it
+
+```
+mklib dll
+mk -d test\bn_c256_test.cpp
+bin\bn_c256_test.exe
+```
+
+open mcl.sln and build or if you have msbuild.exe
+```
+msbuild /p:Configuration=Release
+```
+
+## Build with cmake
+For Linux,
+```
+mkdir build
+cd build
+cmake ..
+make
+```
+For Visual Studio,
+```
+mkdir build
+cd build
+cmake .. -A x64
+msbuild mcl.sln /p:Configuration=Release /m
+```
+## Build for wasm(WebAssembly)
+mcl supports emcc (Emscripten) and `test/bn_test.cpp` runs on browers such as Firefox, Chrome and Edge.
+
+* [IBE on browser](https://herumi.github.io/mcl-wasm/ibe-demo.html)
+* [SHE on browser](https://herumi.github.io/she-wasm/she-demo.html)
+* [BLS signature on brower](https://herumi.github.io/bls-wasm/bls-demo.html)
+
+The timing of a pairing on `BN254` is 2.8msec on 64-bit Firefox with Skylake 3.4GHz.
+
+### Node.js
+
+* [mcl-wasm](https://www.npmjs.com/package/mcl-wasm) pairing library
+* [bls-wasm](https://www.npmjs.com/package/bls-wasm) BLS signature library
+* [she-wasm](https://www.npmjs.com/package/she-wasm) 2 Level Homomorphic Encryption library
+
+### SELinux
+mcl uses Xbyak JIT engine if it is available on x64 architecture,
+otherwise mcl uses a little slower functions generated by LLVM.
+The default mode enables SELinux security policy on CentOS, then JIT is disabled.
+```
+% sudo setenforce 1
+% getenforce
+Enforcing
+% bin/bn_test.exe
+JIT 0
+pairing 1.496Mclk
+finalExp 581.081Kclk
+
+% sudo setenforce 0
+% getenforce
+Permissive
+% bin/bn_test.exe
+JIT 1
+pairing 1.394Mclk
+finalExp 546.259Kclk
+```
+
+# Libraries
+
+* G1 and G2 is defined over Fp
+* The order of G1 and G2 is r.
+* Use `bn256.hpp` if only BN254 is used.
+
+## C++ library
+
+* libmcl.a ; static C++ library of mcl
+* libmcl.so ; shared C++ library of mcl
+* the default parameter of curveType is BN254
+
+header |support curveType |sizeof Fr|sizeof Fp|
+--------------|-------------------------|---------|---------|
+bn256.hpp |BN254 | 32 | 32 |
+bls12_381.hpp |BLS12_381, BN254 | 32 | 48 |
+bn384.hpp |BN381_1, BLS12_381, BN254| 48 | 48 |
+
+## C library
+
+* Define `MCLBN_FR_UNIT_SIZE` and `MCLBN_FP_UNIT_SIZE` and include bn.h
+* set `MCLBN_FR_UNIT_SIZE = MCLBN_FP_UNIT_SIZE` unless `MCLBN_FR_UNIT_SIZE` is defined
+
+
+library |MCLBN_FR_UNIT_SIZE|MCLBN_FP_UNIT_SIZE|
+------------------|------------------|------------------|
+sizeof | Fr | Fp |
+libmclbn256.a | 4 | 4 |
+libmclbn384_256.a | 4 | 6 |
+libmclbn384.a | 6 | 6 |
+
+
+* libmclbn*.a ; static C library
+* libmclbn*.so ; shared C library
+
+### 2nd argument of `mclBn_init`
+Specify `MCLBN_COMPILED_TIME_VAR` to 2nd argument of `mclBn_init`, which
+is defined as `MCLBN_FR_UNIT_SIZE * 10 + MCLBN_FP_UNIT_SIZE`.
+This parameter is used to make sure that the values are the same when the library is built and used.
+
+# How to initialize pairing library
+Call `mcl::bn256::initPairing` before calling any operations.
+```
+#include <mcl/bn256.hpp>
+mcl::bn::CurveParam cp = mcl::BN254; // or mcl::BN_SNARK1
+mcl::bn256::initPairing(cp);
+mcl::bn256::G1 P(...);
+mcl::bn256::G2 Q(...);
+mcl::bn256::Fp12 e;
+mcl::bn256::pairing(e, P, Q);
+```
+1. (BN254) a BN curve over the 254-bit prime p = p(z) where z = -(2^62 + 2^55 + 1).
+2. (BN_SNARK1) a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity.
+3. BN381_1 with `mcl/bn384.hpp`.
+4. BN462 with `mcl/bn512.hpp`.
+
+See [test/bn_test.cpp](https://github.com/herumi/mcl/blob/master/test/bn_test.cpp).
+
+## Default constructor of Fp, Ec, etc.
+A default constructor does not initialize the instance.
+Set a valid value before reffering it.
+
+## Definition of groups
+
+The curve equation for a BN curve is:
+
+ E/Fp: y^2 = x^3 + b .
+
+* the cyclic group G1 is instantiated as E(Fp)[n] where n := p + 1 - t;
+* the cyclic group G2 is instantiated as the inverse image of E'(Fp^2)[n] under a twisting isomorphism phi from E' to E; and
+* the pairing e: G1 x G2 -> Fp12 is the optimal ate pairing.
+
+The field Fp12 is constructed via the following tower:
+
+* Fp2 = Fp[u] / (u^2 + 1)
+* Fp6 = Fp2[v] / (v^3 - Xi) where Xi = u + 1
+* Fp12 = Fp6[w] / (w^2 - v)
+* GT = { x in Fp12 | x^r = 1 }
+
+
+## Arithmetic operations
+
+G1 and G2 is additive group and has the following operations:
+
+* T::add(T& z, const T& x, const T& y); // z = x + y
+* T::sub(T& z, const T& x, const T& y); // z = x - y
+* T::neg(T& y, const T& x); // y = -x
+* T::mul(T& z, const T& x, const INT& y); // z = y times scalar multiplication of x
+
+Remark: &z == &x or &y are allowed. INT means integer type such as Fr, int and mpz_class.
+
+`T::mul` uses GLV method then `G2::mul` returns wrong value if x is not in G2.
+Use `T::mulGeneric(T& z, const T& x, const INT& y)` for x in phi^-1(E'(Fp^2)) - G2.
+
+Fp, Fp2, Fp6 and Fp12 have the following operations:
+
+* T::add(T& z, const T& x, const T& y); // z = x + y
+* T::sub(T& z, const T& x, const T& y); // z = x - y
+* T::mul(T& z, const T& x, const T& y); // z = x * y
+* T::div(T& z, const T& x, const T& y); // z = x / y
+* T::neg(T& y, const T& x); // y = -x
+* T::inv(T& y, const T& x); // y = 1/x
+* T::pow(T& z, const T& x, const INT& y); // z = x^y
+* Fp12::unitaryInv(T& y, const T& x); // y = conjugate of x
+
+Remark: `Fp12::mul` uses GLV method then returns wrong value if x is not in GT.
+Use `Fp12::mulGeneric` for x in Fp12 - GT.
+
+## Map To points
+
+Use these functions to make a point of G1 and G2.
+
+* mapToG1(G1& P, const Fp& x); // assume x != 0
+* mapToG2(G2& P, const Fp2& x);
+* hashAndMapToG1(G1& P, const void *buf, size_t bufSize); // set P by the hash value of [buf, bufSize)
+* hashAndMapToG2(G2& P, const void *buf, size_t bufSize);
+
+These functions maps x into Gi according to [\[_Faster hashing to G2_\]].
+
+## String format of G1 and G2
+G1 and G2 have three elements of Fp (x, y, z) for Jacobi coordinate.
+normalize() method normalizes it to affine coordinate (x, y, 1) or (0, 0, 0).
+
+getStr() method gets
+
+* `0` ; infinity
+* `1 <x> <y>` ; not compressed format
+* `2 <x>` ; compressed format for even y
+* `3 <x>` ; compressed format for odd y
+
+## Generator of G1 and G2
+
+If you want to use the same generators of BLS12-381 with [zkcrypto](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381#g2) then,
+
+```
+// G1 P
+P.setStr('1 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569')
+
+// G2 Q
+Q.setStr('1 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582')
+```
+
+## Serialization format of G1 and G2
+
+pseudo-code to serialize of p
+```
+if bit-length(p) % 8 != 0:
+ size = Fp::getByteSize()
+ if p is zero:
+ return [0] * size
+ else:
+ s = x.serialize()
+ # x in Fp2 is odd <=> x.a is odd
+ if y is odd:
+ s[byte-length(s) - 1] |= 0x80
+ return s
+else:
+ size = Fp::getByteSize() + 1
+ if p is zero:
+ return [0] * size
+ else:
+ s = x.serialize()
+ if y is odd:
+ return 2:s
+ else:
+ return 3:s
+```
+
+## Verify an element in G2
+`G2::isValid()` checks that the element is in the curve of G2 and the order of it is r for subgroup attack.
+`G2::set()`, `G2::setStr` and `operator<<` also check the order.
+If you check it out of the library, then you can stop the verification by calling `G2::verifyOrderG2(false)`.
+
+# How to make asm files (optional)
+The asm files generated by this way are already put in `src/asm`, then it is not necessary to do this.
+
+Install [LLVM](http://llvm.org/).
+```
+make MCL_USE_LLVM=1 LLVM_VER=<llvm-version> UPDATE_ASM=1
+```
+For example, specify `-3.8` for `<llvm-version>` if `opt-3.8` and `llc-3.8` are installed.
+
+If you want to use Fp with 1024-bit prime on x86-64, then
+```
+make MCL_USE_LLVM=1 LLVM_VER=<llvm-version> UPDATE_ASM=1 MCL_MAX_BIT_SIZE=1024
+```
+
+# API for Two level homomorphic encryption
+* [_Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly_](https://dl.acm.org/citation.cfm?doid=3196494.3196552), N. Attrapadung, G. Hanaoka, S. Mitsunari, Y. Sakai,
+K. Shimizu, and T. Teruya. ASIACCS 2018
+* [she-api](https://github.com/herumi/mcl/blob/master/misc/she/she-api.md)
+* [she-api(Japanese)](https://github.com/herumi/mcl/blob/master/misc/she/she-api-ja.md)
+
+# Java API
+See [java.md](https://github.com/herumi/mcl/blob/master/java/java.md)
+
+# License
+
+modified new BSD License
+http://opensource.org/licenses/BSD-3-Clause
+
+This library contains some part of the followings software licensed by BSD-3-Clause.
+* [xbyak](https://github.com/heurmi/xbyak)
+* [cybozulib](https://github.com/heurmi/cybozulib)
+* [Lifted-ElGamal](https://github.com/aistcrypt/Lifted-ElGamal)
+
+# References
+* [ate-pairing](https://github.com/herumi/ate-pairing/)
+* [_Faster Explicit Formulas for Computing Pairings over Ordinary Curves_](http://dx.doi.org/10.1007/978-3-642-20465-4_5),
+ D.F. Aranha, K. Karabina, P. Longa, C.H. Gebotys, J. Lopez,
+ EUROCRYPTO 2011, ([preprint](http://eprint.iacr.org/2010/526))
+* [_High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_](http://dx.doi.org/10.1007/978-3-642-17455-1_2),
+ Jean-Luc Beuchat, Jorge Enrique González Díaz, Shigeo Mitsunari, Eiji Okamoto, Francisco Rodríguez-Henríquez, Tadanori Teruya,
+ Pairing 2010, ([preprint](http://eprint.iacr.org/2010/354))
+* [_Faster hashing to G2_](http://dx.doi.org/10.1007/978-3-642-28496-0_25),Laura Fuentes-Castañeda, Edward Knapp, Francisco Rodríguez-Henríquez,
+ SAC 2011, ([preprint](https://eprint.iacr.org/2008/530))
+* [_Skew Frobenius Map and Efficient Scalar Multiplication for Pairing–Based Cryptography_](https://www.researchgate.net/publication/221282560_Skew_Frobenius_Map_and_Efficient_Scalar_Multiplication_for_Pairing-Based_Cryptography),
+Y. Sakemi, Y. Nogami, K. Okeya, Y. Morikawa, CANS 2008.
+
+# History
+
+* 2019/Mar/22 v0.92 shortcut for Ec::mul(Px, P, x) if P = 0
+* 2019/Mar/21 python binding of she256 for Linux/Mac/Windows
+* 2019/Mar/14 v0.91 modp supports mcl-wasm
+* 2019/Mar/12 v0.90 fix Vint::setArray(x) for x == this
+* 2019/Mar/07 add mclBnFr_setLittleEndianMod, mclBnFp_setLittleEndianMod
+* 2019/Feb/20 LagrangeInterpolation sets out = yVec[0] if k = 1
+* 2019/Jan/31 add mclBnFp_mapToG1, mclBnFp2_mapToG2
+* 2019/Jan/31 fix crash on x64-CPU without AVX (thanks to mortdeus)
+
+# Author
+
+光成滋生 MITSUNARI Shigeo(herumi@nifty.com)
diff --git a/vendor/github.com/tangerine-network/mcl/release.props b/vendor/github.com/tangerine-network/mcl/release.props
new file mode 100644
index 000000000..886ce6890
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/release.props
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ImportGroup Label="PropertySheets" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup />
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemGroup />
+</Project> \ No newline at end of file
diff --git a/vendor/github.com/tangerine-network/mcl/sample/bench.cpp b/vendor/github.com/tangerine-network/mcl/sample/bench.cpp
new file mode 100644
index 000000000..0f865b189
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/bench.cpp
@@ -0,0 +1,233 @@
+#include <cybozu/benchmark.hpp>
+#include <cybozu/option.hpp>
+#include <cybozu/xorshift.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/conversion.hpp>
+#include <mcl/ecparam.hpp>
+
+typedef mcl::FpT<> Fp;
+typedef mcl::FpT<mcl::ZnTag> Zn;
+typedef mcl::EcT<Fp> Ec;
+
+void benchFpSub(const char *pStr, const char *xStr, const char *yStr, mcl::fp::Mode mode)
+{
+ const char *s = mcl::fp::ModeToStr(mode);
+ Fp::init(pStr, mode);
+ Fp x(xStr);
+ Fp y(yStr);
+
+ double addT, subT, mulT, sqrT, invT;
+ CYBOZU_BENCH_T(addT, Fp::add, x, x, x);
+ CYBOZU_BENCH_T(subT, Fp::sub, x, x, y);
+ CYBOZU_BENCH_T(mulT, Fp::mul, x, x, x);
+ CYBOZU_BENCH_T(sqrT, Fp::sqr, x, x);
+ CYBOZU_BENCH_T(invT, x += y;Fp::inv, x, x); // avoid same jmp
+ printf("%10s bit % 3d add %8.2f sub %8.2f mul %8.2f sqr %8.2f inv %8.2f\n", s, (int)Fp::getBitSize(), addT, subT, mulT, sqrT, invT);
+}
+
+void benchFp(size_t bitSize, int mode)
+{
+ const struct {
+ size_t bitSize;
+ const char *p;
+ const char *x;
+ const char *y;
+ } tbl[] = {
+ {
+ 192,
+ "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d",
+ "0x148094810948190412345678901234567900342423332197",
+ "0x7fffffffffffffffffffffe26f2fc170f69466a74defd8d",
+ },
+ {
+ 256,
+ "0x2523648240000001ba344d80000000086121000000000013a700000000000013",
+ "0x1480948109481904123456789234234242423424201234567900342423332197",
+ "0x151342342342341517fffffffffffffffffffffe26f2fc170f69466a74defd8d",
+ },
+ {
+ 384,
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff",
+ "0x19481084109481094820948209482094820984290482212345678901234567900342308472047204720422423332197",
+ "0x209348209481094820984209842094820948204204243123456789012345679003423084720472047204224233321972",
+
+ },
+ {
+ 521,
+ "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0x2908209582095820941098410948109482094820984209840294829049240294242498540975555312345678901234567900342308472047204720422423332197",
+ "0x3948384209834029834092384204920349820948205872380573205782385729385729385723985837ffffffffffffffffffffffe26f2fc170f69466a74defd8d",
+
+ },
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ if (bitSize != 0 && tbl[i].bitSize != bitSize) continue;
+ if (mode & 1) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP);
+ if (mode & 2) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP_MONT);
+#ifdef MCL_USE_LLVM
+ if (mode & 4) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM);
+ if (mode & 8) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM_MONT);
+#endif
+#ifdef MCL_USE_XBYAK
+ if (mode & 16) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_XBYAK);
+#endif
+ }
+}
+
+void benchEcSub(const mcl::EcParam& para, mcl::fp::Mode mode, mcl::ec::Mode ecMode)
+{
+ Fp::init(para.p, mode);
+ Zn::init(para.n);
+ Ec::init(para.a, para.b, ecMode);
+ Fp x(para.gx);
+ Fp y(para.gy);
+ Ec P(x, y);
+ Ec P2; Ec::add(P2, P, P);
+ Ec Q = P + P + P;
+ double addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT;
+ CYBOZU_BENCH_T(addT, P = P2; Ec::add, Q, P, Q);
+ P.normalize();
+ CYBOZU_BENCH_T(add2T, Ec::add, Q, P, Q);
+ CYBOZU_BENCH_T(subT, Ec::sub, Q, P, Q);
+ CYBOZU_BENCH_T(dblT, Ec::dbl, P, P);
+ Zn z("3");
+ CYBOZU_BENCH_T(mulT, Ec::mul, Q, P, z);
+ CYBOZU_BENCH_T(mulCTT, Ec::mulCT, Q, P, z);
+ cybozu::XorShift rg;
+ z.setRand(rg);
+ CYBOZU_BENCH_T(mulRandT, Ec::mul, Q, P, z);
+ CYBOZU_BENCH_T(mulCTRandT, Ec::mulCT, Q, P, z);
+ CYBOZU_BENCH_T(normT, Q = P; Q.normalize);
+ printf("%10s %10s add %8.2f add2 %8.2f sub %8.2f dbl %8.2f mul(3) %8.2f mulCT(3) %8.2f mul(rand) %8.2f mulCT(rand) %8.2f norm %8.2f\n", para.name, mcl::fp::ModeToStr(mode), addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT);
+
+}
+void benchEc(size_t bitSize, int mode, mcl::ec::Mode ecMode)
+{
+ const struct mcl::EcParam tbl[] = {
+ mcl::ecparam::p160_1,
+ mcl::ecparam::secp160k1,
+ mcl::ecparam::secp192k1,
+ mcl::ecparam::NIST_P192,
+ mcl::ecparam::secp224k1,
+ mcl::ecparam::secp256k1,
+ mcl::ecparam::NIST_P224,
+ mcl::ecparam::NIST_P256,
+// mcl::ecparam::secp384r1,
+ mcl::ecparam::NIST_P384,
+// mcl::ecparam::secp521r1,
+ mcl::ecparam::NIST_P521,
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ if (bitSize != 0 && tbl[i].bitSize != bitSize) continue;
+ benchEcSub(tbl[i], mcl::fp::FP_AUTO, ecMode);
+ if (mode & 1) benchEcSub(tbl[i], mcl::fp::FP_GMP, ecMode);
+ if (mode & 2) benchEcSub(tbl[i], mcl::fp::FP_GMP_MONT, ecMode);
+#ifdef MCL_USE_LLVM
+ if (mode & 4) benchEcSub(tbl[i], mcl::fp::FP_LLVM, ecMode);
+ if (mode & 8) benchEcSub(tbl[i], mcl::fp::FP_LLVM_MONT, ecMode);
+#endif
+#ifdef MCL_USE_XBYAK
+ if (mode & 16) benchEcSub(tbl[i], mcl::fp::FP_XBYAK, ecMode);
+#endif
+ }
+}
+
+void benchToStr16()
+{
+ puts("benchToStr16");
+ const char *tbl[] = {
+ "0x0",
+ "0x5",
+ "0x123",
+ "0x123456789012345679adbc",
+ "0xffffffff26f2fc170f69466a74defd8d",
+ "0x100000000000000000000000000000033",
+ "0x11ee12312312940000000000000000000000000002342343"
+ };
+ Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13");
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ char buf[128];
+ std::string str;
+ Fp x(tbl[i]);
+ CYBOZU_BENCH("fp::arrayToHex", mcl::fp::arrayToHex, buf, sizeof(buf), x.getUnit(), x.getUnitSize(), true);
+ mpz_class y(tbl[i]);
+ CYBOZU_BENCH("gmp:getStr ", mcl::gmp::getStr, str, y, 16);
+ }
+}
+
+void benchFromStr16()
+{
+ puts("benchFromStr16");
+ const char *tbl[] = {
+ "0",
+ "5",
+ "123",
+ "123456789012345679adbc",
+ "ffffffff26f2fc170f69466a74defd8d",
+ "100000000000000000000000000000033",
+ "11ee12312312940000000000000000000000000002342343"
+ };
+ Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13");
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ std::string str = tbl[i];
+ Fp x;
+ const size_t N = 64;
+ mcl::fp::Unit buf[N];
+ CYBOZU_BENCH("fp:hexToArray", mcl::fp::hexToArray, buf, N, str.c_str(), str.size());
+
+ mpz_class y;
+ CYBOZU_BENCH("gmp:setStr ", mcl::gmp::setStr, y, str, 16);
+ }
+}
+
+int main(int argc, char *argv[])
+ try
+{
+ size_t bitSize;
+ int mode;
+ bool ecOnly;
+ bool fpOnly;
+ bool misc;
+ mcl::ec::Mode ecMode;
+ std::string ecModeStr;
+ cybozu::Option opt;
+ opt.appendOpt(&bitSize, 0, "s", ": bitSize");
+ opt.appendOpt(&mode, 0, "m", ": mode(0:all, sum of 1:gmp, 2:gmp+mont, 4:llvm, 8:llvm+mont, 16:xbyak");
+ opt.appendBoolOpt(&ecOnly, "ec", ": ec only");
+ opt.appendBoolOpt(&fpOnly, "fp", ": fp only");
+ opt.appendBoolOpt(&misc, "misc", ": other benchmark");
+ opt.appendOpt(&ecModeStr, "jacobi", "ecmode", ": jacobi or proj");
+ opt.appendHelp("h", ": show this message");
+ if (!opt.parse(argc, argv)) {
+ opt.usage();
+ return 1;
+ }
+ if (ecModeStr == "jacobi") {
+ ecMode = mcl::ec::Jacobi;
+ } else if (ecModeStr == "proj") {
+ ecMode = mcl::ec::Proj;
+ } else {
+ printf("bad ecstr %s\n", ecModeStr.c_str());
+ opt.usage();
+ return 1;
+ }
+ if (mode < 0 || mode > 31) {
+ printf("bad mode %d\n", mode);
+ opt.usage();
+ return 1;
+ }
+ if (mode == 0) mode = 31;
+ if (misc) {
+ benchToStr16();
+ benchFromStr16();
+ } else {
+ if (!ecOnly) benchFp(bitSize, mode);
+ if (!fpOnly) {
+ printf("ecMode=%s\n", ecModeStr.c_str());
+ benchEc(bitSize, mode, ecMode);
+ }
+ }
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/sample/bls_sig.cpp b/vendor/github.com/tangerine-network/mcl/sample/bls_sig.cpp
new file mode 100644
index 000000000..d75f7d427
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/bls_sig.cpp
@@ -0,0 +1,70 @@
+/**
+ @file
+ @brief a sample of BLS signature
+ see https://github.com/herumi/bls
+ @author MITSUNARI Shigeo(@herumi)
+ @license modified new BSD license
+ http://opensource.org/licenses/BSD-3-Clause
+
+*/
+#include <mcl/bn256.hpp>
+#include <iostream>
+
+using namespace mcl::bn256;
+
+void Hash(G1& P, const std::string& m)
+{
+ Fp t;
+ t.setHashOf(m);
+ mapToG1(P, t);
+}
+
+void KeyGen(Fr& s, G2& pub, const G2& Q)
+{
+ s.setRand();
+ G2::mul(pub, Q, s); // pub = sQ
+}
+
+void Sign(G1& sign, const Fr& s, const std::string& m)
+{
+ G1 Hm;
+ Hash(Hm, m);
+ G1::mul(sign, Hm, s); // sign = s H(m)
+}
+
+bool Verify(const G1& sign, const G2& Q, const G2& pub, const std::string& m)
+{
+ Fp12 e1, e2;
+ G1 Hm;
+ Hash(Hm, m);
+ pairing(e1, sign, Q); // e1 = e(sign, Q)
+ pairing(e2, Hm, pub); // e2 = e(Hm, sQ)
+ return e1 == e2;
+}
+
+int main(int argc, char *argv[])
+{
+ std::string m = argc == 1 ? "hello mcl" : argv[1];
+
+ // setup parameter
+ initPairing();
+ G2 Q;
+ mapToG2(Q, 1);
+
+ // generate secret key and public key
+ Fr s;
+ G2 pub;
+ KeyGen(s, pub, Q);
+ std::cout << "secret key " << s << std::endl;
+ std::cout << "public key " << pub << std::endl;
+
+ // sign
+ G1 sign;
+ Sign(sign, s, m);
+ std::cout << "msg " << m << std::endl;
+ std::cout << "sign " << sign << std::endl;
+
+ // verify
+ bool ok = Verify(sign, Q, pub, m);
+ std::cout << "verify " << (ok ? "ok" : "ng") << std::endl;
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/ecdh.cpp b/vendor/github.com/tangerine-network/mcl/sample/ecdh.cpp
new file mode 100644
index 000000000..d5c4a31b2
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/ecdh.cpp
@@ -0,0 +1,64 @@
+/*
+ sample of Elliptic Curve Diffie-Hellman key sharing
+*/
+#include <iostream>
+#include <fstream>
+#include <cybozu/random_generator.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/ecparam.hpp>
+
+typedef mcl::FpT<> Fp;
+typedef mcl::FpT<mcl::ZnTag> Zn;
+typedef mcl::EcT<Fp> Ec;
+
+int main()
+{
+ cybozu::RandomGenerator rg;
+ /*
+ system setup with a parameter secp192k1 recommended by SECG
+ Ec is an elliptic curve over Fp
+ the cyclic group of <P> is isomorphic to Zn
+ */
+ const mcl::EcParam& para = mcl::ecparam::secp192k1;
+ Zn::init(para.n);
+ Fp::init(para.p);
+ Ec::init(para.a, para.b);
+ const Ec P(Fp(para.gx), Fp(para.gy));
+
+ /*
+ Alice setups a private key a and public key aP
+ */
+ Zn a;
+ Ec aP;
+
+ a.setRand(rg);
+ Ec::mul(aP, P, a); // aP = a * P;
+
+ std::cout << "aP=" << aP << std::endl;
+
+ /*
+ Bob setups a private key b and public key bP
+ */
+ Zn b;
+ Ec bP;
+
+ b.setRand(rg);
+ Ec::mul(bP, P, b); // bP = b * P;
+
+ std::cout << "bP=" << bP << std::endl;
+
+ Ec abP, baP;
+
+ // Alice uses bP(B's public key) and a(A's priavte key)
+ Ec::mul(abP, bP, a); // abP = a * (bP)
+
+ // Bob uses aP(A's public key) and b(B's private key)
+ Ec::mul(baP, aP, b); // baP = b * (aP)
+
+ if (abP == baP) {
+ std::cout << "key sharing succeed:" << abP << std::endl;
+ } else {
+ std::cout << "ERR(not here)" << std::endl;
+ }
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/sample/large.cpp b/vendor/github.com/tangerine-network/mcl/sample/large.cpp
new file mode 100644
index 000000000..60b2ac900
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/large.cpp
@@ -0,0 +1,125 @@
+/*
+ large prime sample for 64-bit arch
+ make MCL_USE_LLVM=1 MCL_MAX_BIT_SIZE=768
+*/
+#include <mcl/fp.hpp>
+#include <cybozu/benchmark.hpp>
+#include <iostream>
+#include "../src/low_func.hpp"
+
+typedef mcl::FpT<> Fp;
+
+using namespace mcl::fp;
+const size_t N = 12;
+
+void testMul()
+{
+ Unit ux[N], uy[N], a[N * 2], b[N * 2];
+ for (size_t i = 0; i < N; i++) {
+ ux[i] = -i * i + 5;
+ uy[i] = -i * i + 9;
+ }
+ MulPreCore<N, Gtag>::f(a, ux, uy);
+ MulPreCore<N, Ltag>::f(b, ux, uy);
+ for (size_t i = 0; i < N * 2; i++) {
+ if (a[i] != b[i]) {
+ printf("ERR %016llx %016llx\n", (long long)a[i], (long long)b[i]);
+ }
+ }
+ puts("end testMul");
+ CYBOZU_BENCH("gmp ", (MulPreCore<N, Gtag>::f), ux, ux, uy);
+ CYBOZU_BENCH("kara", (MulPre<N, Gtag>::karatsuba), ux, ux, uy);
+}
+
+void mulGmp(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& p)
+{
+ z = (x * y) % p;
+}
+void compareGmp(const std::string& pStr)
+{
+ Fp::init(pStr);
+ std::string xStr = "2104871209348712947120947102843728";
+ std::string s1, s2;
+ {
+ Fp x(xStr);
+ CYBOZU_BENCH_C("mul by mcl", 1000, Fp::mul, x, x, x);
+ std::ostringstream os;
+ os << x;
+ s1 = os.str();
+ }
+ {
+ const mpz_class p(pStr);
+ mpz_class x(xStr);
+ CYBOZU_BENCH_C("mul by GMP", 1000, mulGmp, x, x, x, p);
+ std::ostringstream os;
+ os << x;
+ s2 = os.str();
+ }
+ if (s1 != s2) {
+ puts("ERR");
+ }
+}
+
+void test(const std::string& pStr, mcl::fp::Mode mode)
+{
+ printf("test %s\n", mcl::fp::ModeToStr(mode));
+ Fp::init(pStr, mode);
+ const mcl::fp::Op& op = Fp::getOp();
+ printf("bitSize=%d\n", (int)Fp::getBitSize());
+ mpz_class p(pStr);
+ Fp x = 123456;
+ Fp y;
+ Fp::pow(y, x, p);
+ std::cout << y << std::endl;
+ if (x != y) {
+ std::cout << "err:pow:" << y << std::endl;
+ return;
+ }
+ const size_t N = 24;
+ mcl::fp::Unit ux[N], uy[N];
+ for (size_t i = 0; i < N; i++) {
+ ux[i] = -i * i + 5;
+ uy[i] = -i * i + 9;
+ }
+ CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, ux, ux, uy);
+ CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, ux, ux);
+ CYBOZU_BENCH("add", op.fpDbl_add, ux, ux, ux, op.p);
+ CYBOZU_BENCH("sub", op.fpDbl_sub, ux, ux, ux, op.p);
+ if (op.fpDbl_addPre) {
+ CYBOZU_BENCH("addPre", op.fpDbl_addPre, ux, ux, ux);
+ CYBOZU_BENCH("subPre", op.fpDbl_subPre, ux, ux, ux);
+ }
+ CYBOZU_BENCH("mont", op.fpDbl_mod, ux, ux, op.p);
+ CYBOZU_BENCH("mul", Fp::mul, x, x, x);
+ compareGmp(pStr);
+}
+
+void testAll(const std::string& pStr)
+{
+ test(pStr, mcl::fp::FP_GMP);
+ test(pStr, mcl::fp::FP_GMP_MONT);
+#ifdef MCL_USE_LLVM
+ test(pStr, mcl::fp::FP_LLVM);
+ test(pStr, mcl::fp::FP_LLVM_MONT);
+#endif
+ compareGmp(pStr);
+}
+int main()
+ try
+{
+ const char *pTbl[] = {
+ "40347654345107946713373737062547060536401653012956617387979052445947619094013143666088208645002153616185987062074179207",
+ "13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006083527",
+ "776259046150354467574489744231251277628443008558348305569526019013025476343188443165439204414323238975243865348565536603085790022057407195722143637520590569602227488010424952775132642815799222412631499596858234375446423426908029627",
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) {
+ testAll(pTbl[i]);
+ }
+ testMul();
+} catch (std::exception& e) {
+ printf("err %s\n", e.what());
+ puts("make clean");
+ puts("make -DMCL_MAX_BIT_SIZE=768");
+ return 1;
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/sample/pairing.cpp b/vendor/github.com/tangerine-network/mcl/sample/pairing.cpp
new file mode 100644
index 000000000..230583b6e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/pairing.cpp
@@ -0,0 +1,56 @@
+#include <mcl/bn256.hpp>
+
+using namespace mcl::bn256;
+
+void minimum_sample(const G1& P, const G2& Q)
+{
+ const mpz_class a = 123;
+ const mpz_class b = 456;
+ Fp12 e1, e2;
+ pairing(e1, P, Q);
+ G2 aQ;
+ G1 bP;
+ G2::mul(aQ, Q, a);
+ G1::mul(bP, P, b);
+ pairing(e2, bP, aQ);
+ Fp12::pow(e1, e1, a * b);
+ printf("%s\n", e1 == e2 ? "ok" : "ng");
+}
+
+void miller_and_finel_exp(const G1& P, const G2& Q)
+{
+ Fp12 e1, e2;
+ pairing(e1, P, Q);
+
+ millerLoop(e2, P, Q);
+ finalExp(e2, e2);
+ printf("%s\n", e1 == e2 ? "ok" : "ng");
+}
+
+void precomputed(const G1& P, const G2& Q)
+{
+ Fp12 e1, e2;
+ pairing(e1, P, Q);
+ std::vector<Fp6> Qcoeff;
+ precomputeG2(Qcoeff, Q);
+ precomputedMillerLoop(e2, P, Qcoeff);
+ finalExp(e2, e2);
+ printf("%s\n", e1 == e2 ? "ok" : "ng");
+}
+
+int main()
+{
+ const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441";
+ const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671";
+ const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736";
+ const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616";
+
+ initPairing();
+ G2 Q(Fp2(aa, ab), Fp2(ba, bb));
+ G1 P(-1, 1);
+
+ minimum_sample(P, Q);
+ miller_and_finel_exp(P, Q);
+ precomputed(P, Q);
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/sample/pairing_c.c b/vendor/github.com/tangerine-network/mcl/sample/pairing_c.c
new file mode 100644
index 000000000..5c2cd222a
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/pairing_c.c
@@ -0,0 +1,52 @@
+#include <stdio.h>
+#include <string.h>
+#define MCLBN_FP_UNIT_SIZE 4
+#include <mcl/bn.h>
+
+int g_err = 0;
+#define ASSERT(x) { if (!(x)) { printf("err %s:%d\n", __FILE__, __LINE__); g_err++; } }
+
+int main()
+{
+ char buf[1024];
+ const char *aStr = "123";
+ const char *bStr = "456";
+ mclBn_init(MCL_BN254, MCLBN_FP_UNIT_SIZE);
+ mclBnFr a, b, ab;
+ mclBnG1 P, aP;
+ mclBnG2 Q, bQ;
+ mclBnGT e, e1, e2;
+ mclBnFr_setStr(&a, aStr, strlen(aStr), 10);
+ mclBnFr_setStr(&b, bStr, strlen(bStr), 10);
+ mclBnFr_mul(&ab, &a, &b);
+ mclBnFr_getStr(buf, sizeof(buf), &ab, 10);
+ printf("%s x %s = %s\n", aStr, bStr, buf);
+
+ ASSERT(!mclBnG1_hashAndMapTo(&P, "this", 4));
+ ASSERT(!mclBnG2_hashAndMapTo(&Q, "that", 4));
+ mclBnG1_getStr(buf, sizeof(buf), &P, 16);
+ printf("P = %s\n", buf);
+ mclBnG2_getStr(buf, sizeof(buf), &Q, 16);
+ printf("Q = %s\n", buf);
+
+ mclBnG1_mul(&aP, &P, &a);
+ mclBnG2_mul(&bQ, &Q, &b);
+
+ mclBn_pairing(&e, &P, &Q);
+ mclBnGT_getStr(buf, sizeof(buf), &e, 16);
+ printf("e = %s\n", buf);
+ mclBnGT_pow(&e1, &e, &a);
+ mclBn_pairing(&e2, &aP, &Q);
+ ASSERT(mclBnGT_isEqual(&e1, &e2));
+
+ mclBnGT_pow(&e1, &e, &b);
+ mclBn_pairing(&e2, &P, &bQ);
+ ASSERT(mclBnGT_isEqual(&e1, &e2));
+ if (g_err) {
+ printf("err %d\n", g_err);
+ return 1;
+ } else {
+ printf("no err\n");
+ return 0;
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/random.cpp b/vendor/github.com/tangerine-network/mcl/sample/random.cpp
new file mode 100644
index 000000000..a2a3619ad
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/random.cpp
@@ -0,0 +1,29 @@
+#include <mcl/fp.hpp>
+#include <mcl/gmp_util.hpp>
+#include <mcl/ecparam.hpp>
+#include <cybozu/random_generator.hpp>
+#include <map>
+#include <mcl/fp.hpp>
+typedef mcl::FpT<> Fp;
+
+typedef std::map<std::string, int> Map;
+
+int main(int argc, char *argv[])
+{
+ cybozu::RandomGenerator rg;
+ const char *p = mcl::ecparam::secp192k1.p;
+ if (argc == 2) {
+ p = argv[1];
+ }
+ Fp::init(p);
+ Fp x;
+ printf("p=%s\n", p);
+ Map m;
+ for (int i = 0; i < 10000; i++) {
+ x.setRand(rg);
+ m[x.getStr(16)]++;
+ }
+ for (Map::const_iterator i = m.begin(), ie = m.end(); i != ie; ++i) {
+ printf("%s %d\n", i->first.c_str(), i->second);
+ }
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/rawbench.cpp b/vendor/github.com/tangerine-network/mcl/sample/rawbench.cpp
new file mode 100644
index 000000000..4d7506ef5
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/rawbench.cpp
@@ -0,0 +1,180 @@
+#define PUT(x) std::cout << #x "=" << (x) << std::endl
+#include <cybozu/benchmark.hpp>
+#include <cybozu/option.hpp>
+#include <cybozu/xorshift.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/fp_tower.hpp>
+
+typedef mcl::FpT<mcl::FpTag> Fp;
+typedef mcl::Fp2T<Fp> Fp2;
+typedef mcl::FpDblT<Fp> FpDbl;
+typedef mcl::Fp6T<Fp> Fp6;
+typedef mcl::Fp12T<Fp> Fp12;
+
+typedef mcl::fp::Unit Unit;
+
+void mul9(const mcl::fp::Op& op, Unit *y, const Unit *x, const Unit *p)
+{
+ const size_t maxN = sizeof(Fp) / sizeof(Unit);
+ Unit tmp[maxN];
+ op.fp_add(tmp, x, x, p); // 2x
+ op.fp_add(tmp, tmp, tmp, p); // 4x
+ op.fp_add(tmp, tmp, tmp, p); // 8x
+ op.fp_add(y, tmp, x, p); // 9x
+}
+
+void benchRaw(const char *p, mcl::fp::Mode mode)
+{
+ Fp::init(1, p, mode);
+ Fp2::init();
+ const size_t maxN = sizeof(Fp) / sizeof(Unit);
+ const mcl::fp::Op& op = Fp::getOp();
+ cybozu::XorShift rg;
+ Fp fx, fy;
+ fx.setRand(rg);
+ fy.setRand(rg);
+ Unit ux[maxN * 2] = {};
+ Unit uy[maxN * 2] = {};
+ Unit uz[maxN * 2] = {};
+ memcpy(ux, fx.getUnit(), sizeof(Unit) * op.N);
+ memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N);
+ memcpy(uy, fy.getUnit(), sizeof(Unit) * op.N);
+ memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N);
+ double fp_addT, fp_subT;
+ double fp_addPreT, fp_subPreT;
+ double fp_sqrT, fp_mulT;
+ double fp_mulUnitT;
+ double mul9T;
+ double fp_mulUnitPreT;
+ double fpN1_modT;
+ double fpDbl_addT, fpDbl_subT;
+ double fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT;
+ double fp2_sqrT, fp2_mulT;
+ CYBOZU_BENCH_T(fp_addT, op.fp_add, uz, ux, uy, op.p);
+ CYBOZU_BENCH_T(fp_subT, op.fp_sub, uz, uy, ux, op.p);
+ CYBOZU_BENCH_T(fp_addPreT, op.fp_addPre, uz, ux, uy);
+ CYBOZU_BENCH_T(fp_subPreT, op.fp_subPre, uz, uy, ux);
+ CYBOZU_BENCH_T(fp_sqrT, op.fp_sqr, uz, ux, op.p);
+ CYBOZU_BENCH_T(fp_mulT, op.fp_mul, uz, ux, uy, op.p);
+ CYBOZU_BENCH_T(fp_mulUnitT, op.fp_mulUnit, uz, ux, 9, op.p);
+ CYBOZU_BENCH_T(mul9T, mul9, op, uz, ux, op.p);
+ CYBOZU_BENCH_T(fp_mulUnitPreT, op.fp_mulUnitPre, ux, ux, 9);
+ CYBOZU_BENCH_T(fpN1_modT, op.fpN1_mod, ux, uy, op.p);
+ CYBOZU_BENCH_T(fpDbl_addT, op.fpDbl_add, uz, ux, uy, op.p);
+ CYBOZU_BENCH_T(fpDbl_subT, op.fpDbl_sub, uz, uy, ux, op.p);
+ CYBOZU_BENCH_T(fpDbl_sqrPreT, op.fpDbl_sqrPre, uz, ux);
+ CYBOZU_BENCH_T(fpDbl_mulPreT, op.fpDbl_mulPre, uz, ux, uy);
+ CYBOZU_BENCH_T(fpDbl_modT, op.fpDbl_mod, uz, ux, op.p);
+ Fp2 f2x, f2y;
+ f2x.a = fx;
+ f2x.b = fy;
+ f2y = f2x;
+ CYBOZU_BENCH_T(fp2_sqrT, Fp2::sqr, f2x, f2x);
+ CYBOZU_BENCH_T(fp2_mulT, Fp2::mul, f2x, f2x, f2y);
+ printf("%s\n", mcl::fp::ModeToStr(mode));
+ const char *tStrTbl[] = {
+ "fp_add", "fp_sub",
+ "addPre", "subPre",
+ "fp_sqr", "fp_mul",
+ "mulUnit",
+ "mul9",
+ "mulUnitP",
+ "fpN1_mod",
+ "D_add", "D_sub",
+ "D_sqrPre", "D_mulPre", "D_mod",
+ "fp2_sqr", "fp2_mul",
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tStrTbl); i++) {
+ printf(" %8s", tStrTbl[i]);
+ }
+ printf("\n");
+ const double tTbl[] = {
+ fp_addT, fp_subT,
+ fp_addPreT, fp_subPreT,
+ fp_sqrT, fp_mulT,
+ fp_mulUnitT,
+ mul9T,
+ fp_mulUnitPreT,
+ fpN1_modT,
+ fpDbl_addT, fpDbl_subT,
+ fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT,
+ fp2_sqrT, fp2_mulT,
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tTbl); i++) {
+ printf(" %8.2f", tTbl[i]);
+ }
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+ try
+{
+ cybozu::Option opt;
+ size_t bitSize;
+ opt.appendOpt(&bitSize, 0, "s", ": bitSize");
+ opt.appendHelp("h", ": show this message");
+ if (!opt.parse(argc, argv)) {
+ opt.usage();
+ return 1;
+ }
+ const char *tbl[] = {
+ // N = 2
+ "0x0000000000000001000000000000000d",
+ "0x7fffffffffffffffffffffffffffffff",
+ "0x8000000000000000000000000000001d",
+ "0xffffffffffffffffffffffffffffff61",
+
+ // N = 3
+ "0x000000000000000100000000000000000000000000000033", // min prime
+ "0x70000000000000000000000000000000000000000000001f",
+ "0x800000000000000000000000000000000000000000000005",
+ "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d",
+ "0xfffffffffffffffffffffffffffffffeffffffffffffffff",
+ "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime
+
+ // N = 4
+ "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime
+ "0x2523648240000001ba344d80000000086121000000000013a700000000000013", // BN254
+ "0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", // Snark
+ "0x7523648240000001ba344d80000000086121000000000013a700000000000017",
+ "0x800000000000000000000000000000000000000000000000000000000000005f",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime
+ // N = 5
+ "0x80000000000000000000000000000000000000000000000000000000000000000000000000000009",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3b",
+ // N = 6
+ "0x800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000171",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec3",
+ // N = 7
+ "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff35",
+ // N = 8
+ "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006f",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc7",
+#if MCL_MAX_BIT_SIZE == 1024
+ "0xc70b1ddda9b96e3965e5855942aa5852d8f8e052c760ac32cdfec16a2ed3d56981e1a475e20a70144ed2f5061ba64900f69451492803f815d446ee133d0668f7a7f3276d6301c95ce231f0e4b0d0f3882f10014fca04454cff55d2e2d4cfc1aad33b8d38397e2fc8b623177e63d0b783269c40a85b8f105654783b8ed2e737df",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff97",
+#endif
+ };
+ for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) {
+ const char *p = tbl[i];
+ if (bitSize > 0 && (strlen(p) - 2) * 4 != bitSize) {
+ continue;
+ }
+ printf("prime=%s\n", p);
+ benchRaw(tbl[i], mcl::fp::FP_GMP);
+ benchRaw(tbl[i], mcl::fp::FP_GMP_MONT);
+#ifdef MCL_USE_LLVM
+ benchRaw(tbl[i], mcl::fp::FP_LLVM);
+ benchRaw(tbl[i], mcl::fp::FP_LLVM_MONT);
+#endif
+#ifdef MCL_USE_XBYAK
+ if (bitSize <= 384) {
+ benchRaw(tbl[i], mcl::fp::FP_XBYAK);
+ }
+#endif
+ }
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+ return 1;
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/she_make_dlp_table.cpp b/vendor/github.com/tangerine-network/mcl/sample/she_make_dlp_table.cpp
new file mode 100644
index 000000000..41f18e225
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/she_make_dlp_table.cpp
@@ -0,0 +1,69 @@
+/*
+ make she DLP table
+*/
+#include <mcl/she.hpp>
+#include <cybozu/option.hpp>
+#include <fstream>
+
+using namespace mcl::she;
+
+struct Param {
+ int curveType;
+ int hashBitSize;
+ int group;
+ std::string path;
+};
+
+template<class HashTable, class G>
+void makeTable(const Param& param, const char *groupStr, HashTable& hashTbl, const G& P)
+{
+ char baseName[32];
+ CYBOZU_SNPRINTF(baseName, sizeof(baseName), "she-dlp-%d-%d-%s.bin", param.curveType, param.hashBitSize, groupStr);
+ const std::string fileName = param.path + baseName;
+ printf("file=%s\n", fileName.c_str());
+ std::ofstream ofs(fileName.c_str(), std::ios::binary);
+
+ const size_t hashSize = 1u << param.hashBitSize;
+ hashTbl.init(P, hashSize);
+ hashTbl.save(ofs);
+}
+
+void run(const Param& param)
+{
+ SHE::init(mcl::getCurveParam(param.curveType));
+
+ switch (param.group) {
+ case 1:
+ makeTable(param, "g1", getHashTableG1(), SHE::P_);
+ break;
+ case 2:
+ makeTable(param, "g2", getHashTableG2(), SHE::Q_);
+ break;
+ case 3:
+ makeTable(param, "gt", getHashTableGT(), SHE::ePQ_);
+ break;
+ default:
+ throw cybozu::Exception("bad group") << param.group;
+ }
+}
+
+int main(int argc, char *argv[])
+ try
+{
+ cybozu::Option opt;
+ Param param;
+ opt.appendOpt(&param.curveType, 0, "ct", ": curveType(0:BN254, 1:BN381_1, 5:BLS12_381)");
+ opt.appendOpt(&param.hashBitSize, 20, "hb", ": hash bit size");
+ opt.appendOpt(&param.group, 3, "g", ": group(1:G1, 2:G2, 3:GT");
+ opt.appendOpt(&param.path, "./", "path", ": path to table");
+ opt.appendHelp("h");
+ if (opt.parse(argc, argv)) {
+ run(param);
+ } else {
+ opt.usage();
+ return 1;
+ }
+} catch (std::exception& e) {
+ printf("err %s\n", e.what());
+ return 1;
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/she_smpl.cpp b/vendor/github.com/tangerine-network/mcl/sample/she_smpl.cpp
new file mode 100644
index 000000000..e01b9c130
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/she_smpl.cpp
@@ -0,0 +1,125 @@
+/*
+ sample of somewhat homomorphic encryption(SHE)
+*/
+#define PUT(x) std::cout << #x << "=" << (x) << std::endl;
+#include <cybozu/benchmark.hpp>
+#include <mcl/she.hpp>
+
+using namespace mcl::she;
+
+void miniSample()
+{
+ // init library
+ SHE::init();
+
+ SecretKey sec;
+
+ // init secret key by random_device
+ sec.setByCSPRNG();
+
+ // set range to decode GT DLP
+ SHE::setRangeForDLP(1000);
+
+ PublicKey pub;
+ // get public key
+ sec.getPublicKey(pub);
+
+ const int N = 5;
+ int a[] = { 1, 5, -3, 4, 6 };
+ int b[] = { 4, 2, 1, 9, -2 };
+ // compute correct value
+ int sum = 0;
+ for (size_t i = 0; i < N; i++) {
+ sum += a[i] * b[i];
+ }
+
+ std::vector<CipherText> ca(N), cb(N);
+
+ // encrypt each a[] and b[]
+ for (size_t i = 0; i < N; i++) {
+ pub.enc(ca[i], a[i]);
+ pub.enc(cb[i], b[i]);
+ }
+ CipherText c;
+ c.clearAsMultiplied(); // clear as multiplied before using c.add()
+ // inner product of encrypted vector
+ for (size_t i = 0; i < N; i++) {
+ CipherText t;
+ CipherText::mul(t, ca[i], cb[i]); // t = ca[i] * cb[i]
+ c.add(t); // c += t
+ }
+ // decode it
+ int m = (int)sec.dec(c);
+ // verify the value
+ if (m == sum) {
+ puts("ok");
+ } else {
+ printf("err correct %d err %d\n", sum, m);
+ }
+}
+
+void usePrimitiveCipherText()
+{
+ // init library
+ SHE::init();
+
+ SecretKey sec;
+
+ // init secret key by random_device
+ sec.setByCSPRNG();
+
+ // set range to decode GT DLP
+ SHE::setRangeForGTDLP(100);
+
+ PublicKey pub;
+ // get public key
+ sec.getPublicKey(pub);
+
+ int a1 = 1, a2 = 2;
+ int b1 = 5, b2 = -4;
+ CipherTextG1 c1, c2; // size of CipherTextG1 = N * 2 ; N = 256-bit for CurveFp254BNb
+ CipherTextG2 d1, d2; // size of CipherTextG2 = N * 4
+ pub.enc(c1, a1);
+ pub.enc(c2, a2);
+ pub.enc(d1, b1);
+ pub.enc(d2, b2);
+ c1.add(c2); // CipherTextG1 is additive HE
+ d1.add(d2); // CipherTextG2 is additive HE
+ CipherTextGT cm; // size of CipherTextGT = N * 12 * 4
+ CipherTextGT::mul(cm, c1, d1); // cm = c1 * d1
+ cm.add(cm); // 2cm
+ int m = (int)sec.dec(cm);
+ int ok = (a1 + a2) * (b1 + b2) * 2;
+ if (m == ok) {
+ puts("ok");
+ } else {
+ printf("err m=%d ok=%d\n", m, ok);
+ }
+ std::string s;
+ s = c1.getStr(mcl::IoSerialize); // serialize
+ printf("c1 data size %d byte\n", (int)s.size());
+
+ c2.setStr(s, mcl::IoSerialize);
+ printf("deserialize %s\n", c1 == c2 ? "ok" : "ng");
+
+ s = d1.getStr(mcl::IoSerialize); // serialize
+ printf("d1 data size %d byte\n", (int)s.size());
+ d2.setStr(s, mcl::IoSerialize);
+ printf("deserialize %s\n", d1 == d2 ? "ok" : "ng");
+
+ s = cm.getStr(mcl::IoSerialize); // serialize
+ printf("cm data size %d byte\n", (int)s.size());
+ CipherTextGT cm2;
+ cm2.setStr(s, mcl::IoSerialize);
+ printf("deserialize %s\n", cm == cm2 ? "ok" : "ng");
+}
+
+int main()
+ try
+{
+ miniSample();
+ usePrimitiveCipherText();
+} catch (std::exception& e) {
+ printf("err %s\n", e.what());
+ return 1;
+}
diff --git a/vendor/github.com/tangerine-network/mcl/sample/tri-dh.cpp b/vendor/github.com/tangerine-network/mcl/sample/tri-dh.cpp
new file mode 100644
index 000000000..8b720edbf
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/tri-dh.cpp
@@ -0,0 +1,97 @@
+/*
+ tripartie Diffie-Hellman
+*/
+#include <iostream>
+#include <fstream>
+#include <cybozu/random_generator.hpp>
+#include <mcl/bn256.hpp>
+#include <cybozu/option.hpp>
+
+static cybozu::RandomGenerator rg;
+
+const std::string skSuf = ".sk.txt";
+const std::string pkSuf = ".pk.txt";
+
+using namespace mcl::bn256;
+
+void keygen(const std::string& user)
+{
+ if (user.empty()) {
+ throw cybozu::Exception("keygen:user is empty");
+ }
+ const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441";
+ const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671";
+ const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736";
+ const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616";
+
+
+ initPairing();
+ G2 Q(Fp2(aa, ab), Fp2(ba, bb));
+ G1 P(-1, 1);
+
+ Fr s;
+ s.setRand(rg);
+ G1::mul(P, P, s);
+ G2::mul(Q, Q, s);
+ {
+ std::string name = user + skSuf;
+ std::ofstream ofs(name.c_str(), std::ios::binary);
+ ofs << s << std::endl;
+ }
+ {
+ std::string name = user + pkSuf;
+ std::ofstream ofs(name.c_str(), std::ios::binary);
+ ofs << P << std::endl;
+ ofs << Q << std::endl;
+ }
+}
+
+void load(G1& P, G2& Q, const std::string& fileName)
+{
+ std::ifstream ifs(fileName.c_str(), std::ios::binary);
+ ifs >> P >> Q;
+}
+
+void share(const std::string& skFile, const std::string& pk1File, const std::string& pk2File)
+{
+ initPairing();
+ Fr s;
+ G1 P1, P2;
+ G2 Q1, Q2;
+ {
+ std::ifstream ifs(skFile.c_str(), std::ios::binary);
+ ifs >> s;
+ }
+ load(P1, Q1, pk1File);
+ load(P2, Q2, pk2File);
+ Fp12 e;
+ pairing(e, P1, Q2);
+ {
+ // verify(not necessary)
+ Fp12 e2;
+ pairing(e2, P2, Q1);
+ if (e != e2) {
+ throw cybozu::Exception("share:bad public key file") << e << e2;
+ }
+ }
+ Fp12::pow(e, e, s);
+ std::cout << "share key:\n" << e << std::endl;
+}
+
+int main(int argc, char *argv[])
+ try
+{
+ if (argc == 3 && strcmp(argv[1], "keygen") == 0) {
+ keygen(argv[2]);
+ } else if (argc == 5 && strcmp(argv[1], "share") == 0) {
+ share(argv[2], argv[3], argv[4]);
+ } else {
+ fprintf(stderr, "tri-dh.exe keygen <user name>\n");
+ fprintf(stderr, "tri-dh.exe share <secret key file> <public key1 file> <public key2 file>\n");
+ return 1;
+ }
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+ return 1;
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/sample/vote.cpp b/vendor/github.com/tangerine-network/mcl/sample/vote.cpp
new file mode 100644
index 000000000..88137187c
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/sample/vote.cpp
@@ -0,0 +1,206 @@
+/*
+ vote sample tool
+ Copyright (c) 2014, National Institute of Advanced Industrial
+ Science and Technology All rights reserved.
+ This source file is subject to BSD 3-Clause license.
+
+ modifyed for mcl by herumi
+*/
+#include <iostream>
+#include <fstream>
+#include <cybozu/random_generator.hpp>
+#include <cybozu/option.hpp>
+#include <cybozu/itoa.hpp>
+#include <mcl/fp.hpp>
+#include <mcl/ec.hpp>
+#include <mcl/elgamal.hpp>
+#include <mcl/ecparam.hpp>
+
+typedef mcl::FpT<> Fp;
+typedef mcl::FpT<mcl::ZnTag> Zn; // use ZnTag because Zn is different class with Fp
+typedef mcl::EcT<Fp> Ec;
+typedef mcl::ElgamalT<Ec, Zn> Elgamal;
+
+cybozu::RandomGenerator rg;
+
+const std::string pubFile = "vote_pub.txt";
+const std::string prvFile = "vote_prv.txt";
+const std::string resultFile = "vote_ret.txt";
+
+std::string GetSheetName(size_t n)
+{
+ return std::string("vote_") + cybozu::itoa(n) + ".txt";
+}
+
+struct Param {
+ std::string mode;
+ std::string voteList;
+ Param(int argc, const char *const argv[])
+ {
+ cybozu::Option opt;
+ opt.appendOpt(&voteList, "11001100", "l", ": list of voters for vote mode(eg. 11001100)");
+ opt.appendHelp("h", ": put this message");
+ opt.appendParam(&mode, "mode", ": init/vote/count/open");
+ if (!opt.parse(argc, argv)) {
+ opt.usage();
+ exit(1);
+ }
+ printf("mode=%s\n", mode.c_str());
+ if (mode == "vote") {
+ printf("voters=%s\n", voteList.c_str());
+ size_t pos = voteList.find_first_not_of("01");
+ if (pos != std::string::npos) {
+ printf("bad char %c\n", voteList[pos]);
+ exit(1);
+ }
+ }
+ }
+};
+
+void SysInit()
+{
+ const mcl::EcParam& para = mcl::ecparam::secp192k1;
+ Zn::init(para.n);
+ Fp::init(para.p);
+ Ec::init(para.a, para.b);
+}
+
+template<class T>
+bool Load(T& t, const std::string& name, bool doThrow = true)
+{
+ std::ifstream ifs(name.c_str(), std::ios::binary);
+ if (!ifs) {
+ if (doThrow) throw cybozu::Exception("Load:can't read") << name;
+ return false;
+ }
+ if (ifs >> t) return true;
+ if (doThrow) throw cybozu::Exception("Load:bad data") << name;
+ return false;
+}
+
+template<class T>
+void Save(const std::string& name, const T& t)
+{
+ std::ofstream ofs(name.c_str(), std::ios::binary);
+ ofs << t;
+}
+
+void Init()
+{
+ const mcl::EcParam& para = mcl::ecparam::secp192k1;
+ const Fp x0(para.gx);
+ const Fp y0(para.gy);
+ const Ec P(x0, y0);
+ const size_t bitSize = para.bitSize;
+
+ Elgamal::PrivateKey prv;
+ prv.init(P, bitSize, rg);
+ const Elgamal::PublicKey& pub = prv.getPublicKey();
+ printf("make privateKey=%s, publicKey=%s\n", prvFile.c_str(), pubFile.c_str());
+ Save(prvFile, prv);
+ Save(pubFile, pub);
+}
+
+struct CipherWithZkp {
+ Elgamal::CipherText c;
+ Elgamal::Zkp zkp;
+ bool verify(const Elgamal::PublicKey& pub) const
+ {
+ return pub.verify(c, zkp);
+ }
+};
+
+inline std::ostream& operator<<(std::ostream& os, const CipherWithZkp& self)
+{
+ return os << self.c << std::endl << self.zkp;
+}
+inline std::istream& operator>>(std::istream& is, CipherWithZkp& self)
+{
+ return is >> self.c >> self.zkp;
+}
+
+void Vote(const std::string& voteList)
+{
+ Elgamal::PublicKey pub;
+ Load(pub, pubFile);
+ puts("shuffle");
+ std::vector<size_t> idxTbl(voteList.size());
+ for (size_t i = 0; i < idxTbl.size(); i++) {
+ idxTbl[i] = i;
+ }
+ cybozu::shuffle(idxTbl, rg);
+ puts("each voter votes");
+ for (size_t i = 0; i < voteList.size(); i++) {
+ CipherWithZkp c;
+ pub.encWithZkp(c.c, c.zkp, voteList[i] - '0', rg);
+ const std::string sheetName = GetSheetName(idxTbl[i]);
+ printf("make %s\n", sheetName.c_str());
+ Save(sheetName, c);
+ }
+}
+
+void Count()
+{
+ Elgamal::PublicKey pub;
+ Load(pub, pubFile);
+ Elgamal::CipherText result;
+ puts("aggregate votes");
+ for (size_t i = 0; ; i++) {
+ const std::string sheetName = GetSheetName(i);
+ CipherWithZkp c;
+ if (!Load(c, sheetName, false)) break;
+ if (!c.verify(pub)) throw cybozu::Exception("bad cipher text") << i;
+ printf("add %s\n", sheetName.c_str());
+ result.add(c.c);
+ }
+ printf("create result file : %s\n", resultFile.c_str());
+ Save(resultFile, result);
+}
+
+void Open()
+{
+ Elgamal::PrivateKey prv;
+ Load(prv, prvFile);
+ Elgamal::CipherText c;
+ Load(c, resultFile);
+ Zn n;
+ prv.dec(n, c);
+ std::cout << "result of vote count " << n << std::endl;
+#if 0
+ puts("open real value");
+ for (size_t i = 0; ; i++) {
+ Elgamal::CipherText c;
+ const std::string sheetName = GetSheetName(i);
+ if (!Load(c, sheetName, false)) break;
+ Zn n;
+ prv.dec(n, c);
+ std::cout << sheetName << " " << n << std::endl;
+ }
+#endif
+}
+
+int main(int argc, char *argv[])
+ try
+{
+ const Param p(argc, argv);
+ SysInit();
+ if (p.mode == "init") {
+ Init();
+ } else
+ if (p.mode == "vote") {
+ Vote(p.voteList);
+ } else
+ if (p.mode == "count") {
+ Count();
+ } else
+ if (p.mode == "open") {
+ Open();
+ } else
+ {
+ printf("bad mode=%s\n", p.mode.c_str());
+ return 1;
+ }
+} catch (std::exception& e) {
+ printf("ERR %s\n", e.what());
+}
+
diff --git a/vendor/github.com/tangerine-network/mcl/setvar.bat b/vendor/github.com/tangerine-network/mcl/setvar.bat
new file mode 100644
index 000000000..1d57fa69e
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/setvar.bat
@@ -0,0 +1,2 @@
+set CFLAGS=/MT /DNOMINMAX /Ox /DNDEBUG /W4 /Zi /EHsc /nologo -I./include -I../cybozulib_ext/include
+set LDFLAGS=/LIBPATH:..\cybozulib_ext\lib /LIBPATH:.\lib
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s b/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s
new file mode 100644
index 000000000..a49a36e3a
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s
@@ -0,0 +1,13197 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 2
+ .type makeNIST_P192L,@function
+makeNIST_P192L: // @makeNIST_P192L
+// BB#0:
+ movn x0, #0
+ orr x1, xzr, #0xfffffffffffffffe
+ movn x2, #0
+ ret
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: // @mcl_fpDbl_mod_NIST_P192L
+// BB#0:
+ ldp x8, x9, [x1, #16]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x1]
+ orr w14, wzr, #0x1
+ adds x13, x11, x13
+ adcs x8, x8, xzr
+ adcs x15, xzr, xzr
+ adds x12, x12, x9
+ adcs x13, x13, x10
+ adcs x8, x8, x11
+ adcs x15, x15, xzr
+ adds x11, x12, x11
+ movn x12, #0
+ adcs x9, x13, x9
+ adcs x8, x8, x10
+ adcs x10, x15, xzr
+ adds x11, x10, x11
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ adcs x10, xzr, xzr
+ adds x13, x11, #1 // =1
+ adcs x14, x9, x14
+ adcs x15, x8, xzr
+ adcs x10, x10, x12
+ tst x10, #0x1
+ csel x10, x11, x13, ne
+ csel x9, x9, x14, ne
+ csel x8, x8, x15, ne
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ret
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 2
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: // @mcl_fp_sqr_NIST_P192L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ orr w11, wzr, #0x1
+ umulh x12, x8, x8
+ mul x13, x9, x8
+ mul x14, x10, x8
+ umulh x15, x9, x8
+ adds x12, x12, x13
+ umulh x16, x10, x8
+ adcs x17, x15, x14
+ adcs x18, x16, xzr
+ mul x1, x9, x9
+ mul x2, x10, x9
+ adds x15, x15, x1
+ umulh x1, x9, x9
+ umulh x9, x10, x9
+ adcs x1, x1, x2
+ adcs x3, x9, xzr
+ adds x12, x13, x12
+ adcs x13, x15, x17
+ adcs x15, x1, x18
+ movn x17, #0
+ umulh x18, x10, x10
+ mul x10, x10, x10
+ mul x8, x8, x8
+ adcs x1, x3, xzr
+ adds x16, x16, x2
+ adcs x9, x9, x10
+ adcs x10, x18, xzr
+ adds x13, x14, x13
+ adcs x14, x16, x15
+ adcs x9, x9, x1
+ adcs x10, x10, xzr
+ adds x12, x12, x10
+ adcs x13, x13, xzr
+ adcs x15, xzr, xzr
+ adds x8, x8, x14
+ adcs x12, x12, x9
+ adcs x13, x13, x10
+ adcs x15, x15, xzr
+ adds x8, x8, x10
+ adcs x10, x12, x14
+ adcs x9, x13, x9
+ adcs x12, x15, xzr
+ adds x8, x12, x8
+ adcs x10, x12, x10
+ adcs x9, x9, xzr
+ adcs x12, xzr, xzr
+ adds x13, x8, #1 // =1
+ adcs x11, x10, x11
+ adcs x14, x9, xzr
+ adcs x12, x12, x17
+ tst x12, #0x1
+ csel x8, x8, x13, ne
+ csel x10, x10, x11, ne
+ csel x9, x9, x14, ne
+ stp x8, x10, [x0]
+ str x9, [x0, #16]
+ ret
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 2
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: // @mcl_fp_mulNIST_P192L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #48 // =48
+ mov x19, x0
+ mov x0, sp
+ bl mcl_fpDbl_mulPre3L
+ ldp x9, x8, [sp, #8]
+ ldp x11, x10, [sp, #32]
+ ldr x12, [sp, #24]
+ ldr x13, [sp]
+ orr w14, wzr, #0x1
+ adds x9, x10, x9
+ adcs x8, x8, xzr
+ adcs x15, xzr, xzr
+ adds x13, x13, x12
+ adcs x9, x9, x11
+ adcs x8, x8, x10
+ adcs x15, x15, xzr
+ adds x10, x13, x10
+ movn x13, #0
+ adcs x9, x9, x12
+ adcs x8, x8, x11
+ adcs x11, x15, xzr
+ adds x10, x11, x10
+ adcs x9, x11, x9
+ adcs x8, x8, xzr
+ adcs x11, xzr, xzr
+ adds x12, x10, #1 // =1
+ adcs x14, x9, x14
+ adcs x15, x8, xzr
+ adcs x11, x11, x13
+ tst x11, #0x1
+ csel x10, x10, x12, ne
+ csel x9, x9, x14, ne
+ csel x8, x8, x15, ne
+ stp x10, x9, [x19]
+ str x8, [x19, #16]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: // @mcl_fpDbl_mod_NIST_P521L
+// BB#0:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ ldp x8, x9, [x1, #112]
+ ldr x10, [x1, #128]
+ ldp x11, x12, [x1, #96]
+ ldp x13, x14, [x1, #80]
+ ldp x15, x16, [x1, #64]
+ ldp x17, x18, [x1, #48]
+ ldp x2, x3, [x1, #32]
+ ldp x4, x5, [x1, #16]
+ ldp x6, x1, [x1]
+ extr x7, x10, x9, #9
+ extr x9, x9, x8, #9
+ extr x8, x8, x12, #9
+ extr x12, x12, x11, #9
+ extr x11, x11, x14, #9
+ extr x14, x14, x13, #9
+ extr x13, x13, x16, #9
+ extr x16, x16, x15, #9
+ and x15, x15, #0x1ff
+ lsr x10, x10, #9
+ adds x16, x16, x6
+ adcs x13, x13, x1
+ adcs x14, x14, x4
+ adcs x11, x11, x5
+ adcs x12, x12, x2
+ adcs x1, x8, x3
+ adcs x17, x9, x17
+ adcs x18, x7, x18
+ adcs x2, x10, x15
+ ubfx x8, x2, #9, #1
+ adds x8, x8, x16
+ adcs x9, x13, xzr
+ and x13, x9, x8
+ adcs x10, x14, xzr
+ and x13, x13, x10
+ adcs x11, x11, xzr
+ and x13, x13, x11
+ adcs x12, x12, xzr
+ and x14, x13, x12
+ adcs x13, x1, xzr
+ and x15, x14, x13
+ adcs x14, x17, xzr
+ and x16, x15, x14
+ adcs x15, x18, xzr
+ and x17, x16, x15
+ adcs x16, x2, xzr
+ orr x18, x16, #0xfffffffffffffe00
+ and x17, x17, x18
+ cmn x17, #1 // =1
+ b.eq .LBB4_2
+// BB#1: // %nonzero
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ and x8, x16, #0x1ff
+ str x8, [x0, #64]
+ ldp x29, x30, [sp], #16
+ ret
+.LBB4_2: // %zero
+ mov w1, wzr
+ movz w2, #0x48
+ bl memset
+ ldp x29, x30, [sp], #16
+ ret
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 2
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: // @mcl_fp_mulUnitPre1L
+// BB#0:
+ ldr x8, [x1]
+ mul x9, x8, x2
+ umulh x8, x8, x2
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 2
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: // @mcl_fpDbl_mulPre1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ mul x10, x9, x8
+ umulh x8, x9, x8
+ stp x10, x8, [x0]
+ ret
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 2
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: // @mcl_fpDbl_sqrPre1L
+// BB#0:
+ ldr x8, [x1]
+ mul x9, x8, x8
+ umulh x8, x8, x8
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 2
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: // @mcl_fp_mont1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldur x10, [x3, #-8]
+ ldr x11, [x3]
+ umulh x12, x9, x8
+ mul x8, x9, x8
+ mul x9, x8, x10
+ umulh x10, x9, x11
+ mul x9, x9, x11
+ cmn x9, x8
+ adcs x8, x10, x12
+ adcs x9, xzr, xzr
+ subs x10, x8, x11
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0]
+ ret
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 2
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: // @mcl_fp_montNF1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldur x10, [x3, #-8]
+ ldr x11, [x3]
+ umulh x12, x9, x8
+ mul x8, x9, x8
+ mul x9, x8, x10
+ umulh x10, x9, x11
+ mul x9, x9, x11
+ cmn x9, x8
+ adcs x8, x10, x12
+ sub x9, x8, x11
+ cmp x9, #0 // =0
+ csel x8, x8, x9, lt
+ str x8, [x0]
+ ret
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 2
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: // @mcl_fp_montRed1L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x11, [x1]
+ ldr x10, [x2]
+ mul x8, x9, x8
+ umulh x12, x8, x10
+ mul x8, x8, x10
+ cmn x9, x8
+ adcs x8, x11, x12
+ adcs x9, xzr, xzr
+ subs x10, x8, x10
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0]
+ ret
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 2
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: // @mcl_fp_addPre1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ adds x9, x9, x8
+ adcs x8, xzr, xzr
+ str x9, [x0]
+ mov x0, x8
+ ret
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 2
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: // @mcl_fp_subPre1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ subs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0]
+ mov x0, x8
+ ret
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 2
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: // @mcl_fp_shr1_1L
+// BB#0:
+ ldr x8, [x1]
+ lsr x8, x8, #1
+ str x8, [x0]
+ ret
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 2
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: // @mcl_fp_add1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ ldr x10, [x3]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x9, xzr, xzr
+ subs x8, x8, x10
+ sbcs x9, x9, xzr
+ and w9, w9, #0x1
+ tbnz w9, #0, .LBB14_2
+// BB#1: // %nocarry
+ str x8, [x0]
+.LBB14_2: // %carry
+ ret
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 2
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: // @mcl_fp_addNF1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ ldr x10, [x3]
+ add x8, x9, x8
+ sub x9, x8, x10
+ cmp x9, #0 // =0
+ csel x8, x8, x9, lt
+ str x8, [x0]
+ ret
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 2
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: // @mcl_fp_sub1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ subs x8, x9, x8
+ str x8, [x0]
+ ngcs x9, xzr
+ and w9, w9, #0x1
+ tbnz w9, #0, .LBB16_2
+// BB#1: // %nocarry
+ ret
+.LBB16_2: // %carry
+ ldr x9, [x3]
+ add x8, x9, x8
+ str x8, [x0]
+ ret
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 2
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: // @mcl_fp_subNF1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldr x10, [x3]
+ sub x8, x9, x8
+ and x9, x10, x8, asr #63
+ add x8, x9, x8
+ str x8, [x0]
+ ret
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 2
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: // @mcl_fpDbl_add1L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ ldr x12, [x3]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x8, x10, x11
+ adcs x9, xzr, xzr
+ subs x10, x8, x12
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0, #8]
+ ret
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 2
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: // @mcl_fpDbl_sub1L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ ldr x12, [x3]
+ subs x8, x8, x9
+ str x8, [x0]
+ sbcs x8, x11, x10
+ ngcs x9, xzr
+ tst x9, #0x1
+ csel x9, x12, xzr, ne
+ add x8, x9, x8
+ str x8, [x0, #8]
+ ret
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 2
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: // @mcl_fp_mulUnitPre2L
+// BB#0:
+ ldp x8, x9, [x1]
+ mul x10, x8, x2
+ mul x11, x9, x2
+ umulh x8, x8, x2
+ umulh x9, x9, x2
+ adds x8, x8, x11
+ stp x10, x8, [x0]
+ adcs x8, x9, xzr
+ str x8, [x0, #16]
+ ret
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 2
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: // @mcl_fpDbl_mulPre2L
+// BB#0:
+ ldp x8, x11, [x2]
+ ldp x9, x10, [x1]
+ mul x12, x9, x8
+ umulh x13, x10, x8
+ mul x14, x10, x8
+ umulh x8, x9, x8
+ mul x15, x9, x11
+ mul x16, x10, x11
+ umulh x9, x9, x11
+ umulh x10, x10, x11
+ adds x8, x8, x14
+ adcs x11, x13, xzr
+ adds x8, x8, x15
+ stp x12, x8, [x0]
+ adcs x8, x11, x16
+ adcs x11, xzr, xzr
+ adds x8, x8, x9
+ str x8, [x0, #16]
+ adcs x8, x11, x10
+ str x8, [x0, #24]
+ ret
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 2
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: // @mcl_fpDbl_sqrPre2L
+// BB#0:
+ ldp x8, x9, [x1]
+ mul x10, x8, x8
+ umulh x11, x9, x8
+ mul x12, x9, x8
+ umulh x8, x8, x8
+ umulh x13, x9, x9
+ mul x9, x9, x9
+ str x10, [x0]
+ adds x8, x8, x12
+ adcs x10, x11, xzr
+ adds x9, x11, x9
+ adcs x11, x13, xzr
+ adds x8, x12, x8
+ str x8, [x0, #8]
+ adcs x8, x9, x10
+ str x8, [x0, #16]
+ adcs x8, x11, xzr
+ str x8, [x0, #24]
+ ret
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 2
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: // @mcl_fp_mont2L
+// BB#0:
+ ldp x8, x14, [x2]
+ ldp x9, x10, [x1]
+ ldur x11, [x3, #-8]
+ ldp x12, x13, [x3]
+ umulh x15, x10, x8
+ mul x16, x10, x8
+ umulh x17, x9, x8
+ mul x8, x9, x8
+ umulh x18, x14, x10
+ mul x10, x14, x10
+ umulh x1, x14, x9
+ mul x9, x14, x9
+ adds x14, x17, x16
+ mul x16, x8, x11
+ adcs x15, x15, xzr
+ mul x17, x16, x13
+ umulh x2, x16, x12
+ adds x17, x2, x17
+ umulh x2, x16, x13
+ mul x16, x16, x12
+ adcs x2, x2, xzr
+ cmn x16, x8
+ adcs x8, x17, x14
+ adcs x14, x2, x15
+ adcs x15, xzr, xzr
+ adds x10, x1, x10
+ adcs x16, x18, xzr
+ adds x8, x8, x9
+ adcs x9, x14, x10
+ mul x10, x8, x11
+ adcs x11, x15, x16
+ umulh x14, x10, x13
+ mul x15, x10, x13
+ umulh x16, x10, x12
+ mul x10, x10, x12
+ adcs x17, xzr, xzr
+ adds x15, x16, x15
+ adcs x14, x14, xzr
+ cmn x10, x8
+ adcs x8, x15, x9
+ adcs x9, x14, x11
+ adcs x10, x17, xzr
+ subs x11, x8, x12
+ sbcs x12, x9, x13
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 2
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: // @mcl_fp_montNF2L
+// BB#0:
+ ldp x8, x14, [x2]
+ ldp x9, x10, [x1]
+ ldur x11, [x3, #-8]
+ ldp x12, x13, [x3]
+ umulh x15, x10, x8
+ mul x16, x10, x8
+ umulh x17, x9, x8
+ mul x8, x9, x8
+ umulh x18, x14, x10
+ mul x10, x14, x10
+ umulh x1, x14, x9
+ mul x9, x14, x9
+ adds x14, x17, x16
+ mul x16, x8, x11
+ adcs x15, x15, xzr
+ mul x17, x16, x12
+ cmn x17, x8
+ mul x8, x16, x13
+ umulh x17, x16, x13
+ umulh x16, x16, x12
+ adcs x8, x8, x14
+ adcs x14, x15, xzr
+ adds x8, x8, x16
+ adcs x14, x14, x17
+ adds x10, x1, x10
+ adcs x15, x18, xzr
+ adds x8, x9, x8
+ adcs x9, x10, x14
+ mul x10, x8, x11
+ adcs x11, x15, xzr
+ mul x14, x10, x13
+ mul x15, x10, x12
+ umulh x16, x10, x13
+ umulh x10, x10, x12
+ cmn x15, x8
+ adcs x8, x14, x9
+ adcs x9, x11, xzr
+ adds x8, x8, x10
+ adcs x9, x9, x16
+ subs x10, x8, x12
+ sbcs x11, x9, x13
+ cmp x11, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 2
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: // @mcl_fp_montRed2L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x14, [x1]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1, #16]
+ mul x15, x9, x8
+ mul x16, x15, x11
+ umulh x17, x15, x10
+ adds x16, x17, x16
+ umulh x17, x15, x11
+ mul x15, x15, x10
+ adcs x17, x17, xzr
+ cmn x9, x15
+ adcs x9, x14, x16
+ adcs x12, x12, x17
+ mul x8, x9, x8
+ adcs x13, x13, xzr
+ umulh x14, x8, x11
+ mul x15, x8, x11
+ umulh x16, x8, x10
+ mul x8, x8, x10
+ adcs x17, xzr, xzr
+ adds x15, x16, x15
+ adcs x14, x14, xzr
+ cmn x8, x9
+ adcs x8, x15, x12
+ adcs x9, x14, x13
+ adcs x12, x17, xzr
+ subs x10, x8, x10
+ sbcs x11, x9, x11
+ sbcs x12, x12, xzr
+ tst x12, #0x1
+ csel x8, x8, x10, ne
+ csel x9, x9, x11, ne
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 2
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: // @mcl_fp_addPre2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x9, x10, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #8]
+ mov x0, x8
+ ret
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 2
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: // @mcl_fp_subPre2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x8, x8, x9
+ str x8, [x0]
+ sbcs x9, x11, x10
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #8]
+ mov x0, x8
+ ret
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 2
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: // @mcl_fp_shr1_2L
+// BB#0:
+ ldp x8, x9, [x1]
+ extr x8, x9, x8, #1
+ lsr x9, x9, #1
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 2
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: // @mcl_fp_add2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ adds x8, x9, x8
+ ldp x9, x12, [x3]
+ adcs x10, x10, x11
+ stp x8, x10, [x0]
+ adcs x11, xzr, xzr
+ subs x9, x8, x9
+ sbcs x8, x10, x12
+ sbcs x10, x11, xzr
+ and w10, w10, #0x1
+ tbnz w10, #0, .LBB29_2
+// BB#1: // %nocarry
+ stp x9, x8, [x0]
+.LBB29_2: // %carry
+ ret
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 2
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: // @mcl_fp_addNF2L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x3]
+ adds x8, x10, x8
+ adcs x9, x11, x9
+ subs x10, x8, x12
+ sbcs x11, x9, x13
+ cmp x11, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 2
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: // @mcl_fp_sub2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x9, x8, x9
+ sbcs x8, x11, x10
+ stp x9, x8, [x0]
+ ngcs x10, xzr
+ and w10, w10, #0x1
+ tbnz w10, #0, .LBB31_2
+// BB#1: // %nocarry
+ ret
+.LBB31_2: // %carry
+ ldp x10, x11, [x3]
+ adds x9, x10, x9
+ adcs x8, x11, x8
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 2
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: // @mcl_fp_subNF2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x8, x8, x9
+ ldp x9, x12, [x3]
+ sbcs x10, x11, x10
+ asr x11, x10, #63
+ and x9, x11, x9
+ and x11, x11, x12
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x8, x11, x10
+ str x8, [x0, #8]
+ ret
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 2
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: // @mcl_fpDbl_add2L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x15, [x1]
+ ldp x11, x14, [x2]
+ ldp x12, x13, [x1, #16]
+ adds x10, x11, x10
+ ldp x11, x16, [x3]
+ str x10, [x0]
+ adcs x10, x14, x15
+ str x10, [x0, #8]
+ adcs x8, x8, x12
+ adcs x9, x9, x13
+ adcs x10, xzr, xzr
+ subs x11, x8, x11
+ sbcs x12, x9, x16
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ stp x8, x9, [x0, #16]
+ ret
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 2
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: // @mcl_fpDbl_sub2L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x14, [x2]
+ ldp x11, x15, [x1]
+ ldp x12, x13, [x1, #16]
+ subs x10, x11, x10
+ ldp x11, x16, [x3]
+ str x10, [x0]
+ sbcs x10, x15, x14
+ str x10, [x0, #8]
+ sbcs x8, x12, x8
+ sbcs x9, x13, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x16, xzr, ne
+ csel x11, x11, xzr, ne
+ adds x8, x11, x8
+ str x8, [x0, #16]
+ adcs x8, x10, x9
+ str x8, [x0, #24]
+ ret
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 2
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: // @mcl_fp_mulUnitPre3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ mul x11, x8, x2
+ mul x12, x9, x2
+ umulh x8, x8, x2
+ mul x13, x10, x2
+ umulh x9, x9, x2
+ umulh x10, x10, x2
+ adds x8, x8, x12
+ stp x11, x8, [x0]
+ adcs x8, x9, x13
+ str x8, [x0, #16]
+ adcs x8, x10, xzr
+ str x8, [x0, #24]
+ ret
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 2
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: // @mcl_fpDbl_mulPre3L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x8, x9, [x1]
+ ldp x10, x12, [x2]
+ ldr x11, [x1, #16]
+ ldr x13, [x2, #16]
+ mul x14, x8, x10
+ umulh x15, x11, x10
+ mul x16, x11, x10
+ umulh x17, x9, x10
+ mul x18, x9, x10
+ umulh x10, x8, x10
+ mul x1, x8, x12
+ mul x2, x11, x12
+ mul x3, x9, x12
+ umulh x4, x11, x12
+ umulh x5, x9, x12
+ umulh x12, x8, x12
+ mul x6, x8, x13
+ mul x7, x11, x13
+ mul x19, x9, x13
+ umulh x8, x8, x13
+ umulh x9, x9, x13
+ umulh x11, x11, x13
+ str x14, [x0]
+ adds x10, x10, x18
+ adcs x13, x17, x16
+ adcs x14, x15, xzr
+ adds x10, x10, x1
+ str x10, [x0, #8]
+ adcs x10, x13, x3
+ adcs x13, x14, x2
+ adcs x14, xzr, xzr
+ adds x10, x10, x12
+ adcs x12, x13, x5
+ adcs x13, x14, x4
+ adds x10, x10, x6
+ str x10, [x0, #16]
+ adcs x10, x12, x19
+ adcs x12, x13, x7
+ adcs x13, xzr, xzr
+ adds x8, x10, x8
+ str x8, [x0, #24]
+ adcs x8, x12, x9
+ str x8, [x0, #32]
+ adcs x8, x13, x11
+ str x8, [x0, #40]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 2
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: // @mcl_fpDbl_sqrPre3L
+// BB#0:
+ ldp x8, x10, [x1]
+ ldr x9, [x1, #16]
+ mul x11, x8, x8
+ umulh x12, x9, x8
+ mul x13, x9, x8
+ umulh x14, x10, x8
+ mul x15, x10, x8
+ umulh x8, x8, x8
+ mul x16, x9, x10
+ str x11, [x0]
+ adds x8, x8, x15
+ adcs x11, x14, x13
+ adcs x17, x12, xzr
+ adds x8, x8, x15
+ mul x15, x10, x10
+ str x8, [x0, #8]
+ umulh x8, x9, x10
+ umulh x10, x10, x10
+ adcs x11, x11, x15
+ adcs x15, x17, x16
+ adcs x17, xzr, xzr
+ adds x11, x11, x14
+ umulh x14, x9, x9
+ mul x9, x9, x9
+ adcs x10, x15, x10
+ adcs x15, x17, x8
+ adds x12, x12, x16
+ adcs x8, x8, x9
+ adcs x9, x14, xzr
+ adds x11, x13, x11
+ adcs x10, x12, x10
+ stp x11, x10, [x0, #16]
+ adcs x8, x8, x15
+ str x8, [x0, #32]
+ adcs x8, x9, xzr
+ str x8, [x0, #40]
+ ret
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 2
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: // @mcl_fp_mont3L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x15, x16, [x2]
+ ldp x13, x14, [x1, #8]
+ ldr x12, [x1]
+ ldur x11, [x3, #-8]
+ ldp x9, x8, [x3, #8]
+ ldr x10, [x3]
+ ldr x17, [x2, #16]
+ umulh x18, x14, x15
+ mul x1, x14, x15
+ umulh x2, x13, x15
+ mul x3, x13, x15
+ umulh x4, x12, x15
+ mul x15, x12, x15
+ umulh x5, x16, x14
+ mul x6, x16, x14
+ umulh x7, x16, x13
+ mul x19, x16, x13
+ umulh x20, x16, x12
+ mul x16, x16, x12
+ umulh x21, x17, x14
+ mul x14, x17, x14
+ adds x3, x4, x3
+ mul x4, x15, x11
+ adcs x1, x2, x1
+ mul x2, x4, x8
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ adcs x18, x18, xzr
+ adds x22, x23, x22
+ umulh x23, x4, x9
+ adcs x2, x23, x2
+ umulh x23, x4, x8
+ mul x4, x4, x10
+ adcs x23, x23, xzr
+ cmn x4, x15
+ umulh x15, x17, x13
+ mul x13, x17, x13
+ umulh x4, x17, x12
+ mul x12, x17, x12
+ adcs x17, x22, x3
+ adcs x1, x2, x1
+ adcs x18, x23, x18
+ adcs x2, xzr, xzr
+ adds x3, x20, x19
+ adcs x6, x7, x6
+ adcs x5, x5, xzr
+ adds x16, x17, x16
+ adcs x17, x1, x3
+ mul x1, x16, x11
+ adcs x18, x18, x6
+ mul x3, x1, x8
+ mul x6, x1, x9
+ umulh x7, x1, x10
+ adcs x2, x2, x5
+ adcs x5, xzr, xzr
+ adds x6, x7, x6
+ umulh x7, x1, x9
+ adcs x3, x7, x3
+ umulh x7, x1, x8
+ mul x1, x1, x10
+ adcs x7, x7, xzr
+ cmn x1, x16
+ adcs x16, x6, x17
+ adcs x17, x3, x18
+ adcs x18, x7, x2
+ adcs x1, x5, xzr
+ adds x13, x4, x13
+ adcs x14, x15, x14
+ adcs x15, x21, xzr
+ adds x12, x16, x12
+ adcs x13, x17, x13
+ mul x11, x12, x11
+ adcs x14, x18, x14
+ umulh x16, x11, x8
+ mul x17, x11, x8
+ umulh x18, x11, x9
+ mul x2, x11, x9
+ umulh x3, x11, x10
+ mul x11, x11, x10
+ adcs x15, x1, x15
+ adcs x1, xzr, xzr
+ adds x2, x3, x2
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ cmn x11, x12
+ adcs x11, x2, x13
+ adcs x12, x17, x14
+ adcs x13, x16, x15
+ adcs x14, x1, xzr
+ subs x10, x11, x10
+ sbcs x9, x12, x9
+ sbcs x8, x13, x8
+ sbcs x14, x14, xzr
+ tst x14, #0x1
+ csel x10, x11, x10, ne
+ csel x9, x12, x9, ne
+ csel x8, x13, x8, ne
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 2
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: // @mcl_fp_montNF3L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x14, x16, [x2]
+ ldp x15, x13, [x1, #8]
+ ldr x12, [x1]
+ ldur x11, [x3, #-8]
+ ldp x9, x8, [x3, #8]
+ ldr x10, [x3]
+ ldr x17, [x2, #16]
+ umulh x18, x13, x14
+ mul x1, x13, x14
+ umulh x2, x15, x14
+ mul x3, x15, x14
+ umulh x4, x12, x14
+ mul x14, x12, x14
+ umulh x5, x16, x13
+ mul x6, x16, x13
+ umulh x7, x16, x15
+ mul x19, x16, x15
+ umulh x20, x16, x12
+ mul x16, x16, x12
+ umulh x21, x17, x13
+ mul x13, x17, x13
+ adds x3, x4, x3
+ mul x4, x14, x11
+ adcs x1, x2, x1
+ mul x2, x4, x10
+ adcs x18, x18, xzr
+ cmn x2, x14
+ umulh x14, x17, x15
+ mul x15, x17, x15
+ umulh x2, x17, x12
+ mul x12, x17, x12
+ mul x17, x4, x9
+ adcs x17, x17, x3
+ mul x3, x4, x8
+ adcs x1, x3, x1
+ umulh x3, x4, x10
+ adcs x18, x18, xzr
+ adds x17, x17, x3
+ umulh x3, x4, x9
+ adcs x1, x1, x3
+ umulh x3, x4, x8
+ adcs x18, x18, x3
+ adds x3, x20, x19
+ adcs x4, x7, x6
+ adcs x5, x5, xzr
+ adds x16, x16, x17
+ adcs x17, x3, x1
+ mul x1, x16, x11
+ adcs x18, x4, x18
+ mul x3, x1, x8
+ mul x4, x1, x10
+ adcs x5, x5, xzr
+ cmn x4, x16
+ mul x16, x1, x9
+ umulh x4, x1, x8
+ adcs x16, x16, x17
+ umulh x17, x1, x9
+ umulh x1, x1, x10
+ adcs x18, x3, x18
+ adcs x3, x5, xzr
+ adds x16, x16, x1
+ adcs x17, x18, x17
+ adcs x18, x3, x4
+ adds x15, x2, x15
+ adcs x13, x14, x13
+ adcs x14, x21, xzr
+ adds x12, x12, x16
+ adcs x15, x15, x17
+ mul x11, x12, x11
+ adcs x13, x13, x18
+ mul x16, x11, x8
+ mul x17, x11, x9
+ mul x18, x11, x10
+ umulh x1, x11, x8
+ umulh x2, x11, x9
+ umulh x11, x11, x10
+ adcs x14, x14, xzr
+ cmn x18, x12
+ adcs x12, x17, x15
+ adcs x13, x16, x13
+ adcs x14, x14, xzr
+ adds x11, x12, x11
+ adcs x12, x13, x2
+ adcs x13, x14, x1
+ subs x10, x11, x10
+ sbcs x9, x12, x9
+ sbcs x8, x13, x8
+ asr x14, x8, #63
+ cmp x14, #0 // =0
+ csel x10, x11, x10, lt
+ csel x9, x12, x9, lt
+ csel x8, x13, x8, lt
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 2
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: // @mcl_fp_montRed3L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x17, [x1]
+ ldp x12, x10, [x2, #8]
+ ldr x11, [x2]
+ ldp x13, x14, [x1, #32]
+ ldp x15, x16, [x1, #16]
+ mul x18, x9, x8
+ umulh x1, x18, x10
+ mul x2, x18, x10
+ umulh x3, x18, x12
+ mul x4, x18, x12
+ umulh x5, x18, x11
+ mul x18, x18, x11
+ adds x4, x5, x4
+ adcs x2, x3, x2
+ adcs x1, x1, xzr
+ cmn x9, x18
+ adcs x9, x17, x4
+ adcs x15, x15, x2
+ mul x17, x9, x8
+ adcs x16, x16, x1
+ umulh x18, x17, x10
+ mul x1, x17, x10
+ umulh x2, x17, x12
+ mul x3, x17, x12
+ umulh x4, x17, x11
+ mul x17, x17, x11
+ adcs x13, x13, xzr
+ adcs x14, x14, xzr
+ adcs x5, xzr, xzr
+ adds x3, x4, x3
+ adcs x1, x2, x1
+ adcs x18, x18, xzr
+ cmn x17, x9
+ adcs x9, x3, x15
+ adcs x15, x1, x16
+ mul x8, x9, x8
+ adcs x13, x18, x13
+ umulh x16, x8, x10
+ mul x17, x8, x10
+ umulh x18, x8, x12
+ mul x1, x8, x12
+ umulh x2, x8, x11
+ mul x8, x8, x11
+ adcs x14, x14, xzr
+ adcs x3, x5, xzr
+ adds x1, x2, x1
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ cmn x8, x9
+ adcs x8, x1, x15
+ adcs x9, x17, x13
+ adcs x13, x16, x14
+ adcs x14, x3, xzr
+ subs x11, x8, x11
+ sbcs x12, x9, x12
+ sbcs x10, x13, x10
+ sbcs x14, x14, xzr
+ tst x14, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ csel x10, x13, x10, ne
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 2
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: // @mcl_fp_addPre3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ adds x9, x10, x9
+ str x9, [x0]
+ adcs x9, x11, x12
+ str x9, [x0, #8]
+ adcs x9, x8, x13
+ adcs x8, xzr, xzr
+ str x9, [x0, #16]
+ mov x0, x8
+ ret
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 2
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: // @mcl_fp_subPre3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ subs x9, x9, x10
+ str x9, [x0]
+ sbcs x9, x12, x11
+ str x9, [x0, #8]
+ sbcs x9, x13, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #16]
+ mov x0, x8
+ ret
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 2
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: // @mcl_fp_shr1_3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 2
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: // @mcl_fp_add3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ adds x9, x10, x9
+ adcs x11, x11, x12
+ ldr x10, [x3]
+ ldp x12, x14, [x3, #8]
+ stp x9, x11, [x0]
+ adcs x8, x8, x13
+ str x8, [x0, #16]
+ adcs x13, xzr, xzr
+ subs x10, x9, x10
+ sbcs x9, x11, x12
+ sbcs x8, x8, x14
+ sbcs x11, x13, xzr
+ and w11, w11, #0x1
+ tbnz w11, #0, .LBB44_2
+// BB#1: // %nocarry
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+.LBB44_2: // %carry
+ ret
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 2
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: // @mcl_fp_addNF3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x2]
+ ldr x12, [x1, #16]
+ ldr x13, [x2, #16]
+ adds x8, x10, x8
+ adcs x9, x11, x9
+ ldp x10, x11, [x3]
+ ldr x14, [x3, #16]
+ adcs x12, x13, x12
+ subs x10, x8, x10
+ sbcs x11, x9, x11
+ sbcs x13, x12, x14
+ asr x14, x13, #63
+ cmp x14, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ csel x10, x12, x13, lt
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 2
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: // @mcl_fp_sub3L
+// BB#0:
+ ldp x11, x10, [x2, #8]
+ ldp x8, x12, [x1]
+ ldr x9, [x2]
+ ldr x13, [x1, #16]
+ subs x8, x8, x9
+ sbcs x9, x12, x11
+ stp x8, x9, [x0]
+ sbcs x10, x13, x10
+ str x10, [x0, #16]
+ ngcs x11, xzr
+ and w11, w11, #0x1
+ tbnz w11, #0, .LBB46_2
+// BB#1: // %nocarry
+ ret
+.LBB46_2: // %carry
+ ldp x13, x11, [x3, #8]
+ ldr x12, [x3]
+ adds x8, x12, x8
+ adcs x9, x13, x9
+ adcs x10, x11, x10
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 2
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: // @mcl_fp_subNF3L
+// BB#0:
+ ldp x8, x9, [x2]
+ ldp x10, x11, [x1]
+ ldr x12, [x2, #16]
+ ldr x13, [x1, #16]
+ subs x8, x10, x8
+ sbcs x9, x11, x9
+ ldp x10, x11, [x3]
+ ldr x14, [x3, #16]
+ sbcs x12, x13, x12
+ asr x13, x12, #63
+ and x11, x13, x11
+ and x14, x13, x14
+ extr x13, x13, x12, #63
+ and x10, x13, x10
+ adds x8, x10, x8
+ str x8, [x0]
+ adcs x8, x11, x9
+ str x8, [x0, #8]
+ adcs x8, x14, x12
+ str x8, [x0, #16]
+ ret
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 2
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: // @mcl_fpDbl_add3L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x15, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x14, x1, [x1]
+ adds x14, x15, x14
+ ldr x15, [x3, #16]
+ str x14, [x0]
+ ldp x14, x2, [x3]
+ adcs x18, x18, x1
+ adcs x12, x12, x16
+ stp x18, x12, [x0, #8]
+ adcs x12, x13, x17
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x12, x14
+ sbcs x13, x8, x2
+ sbcs x14, x9, x15
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x12, x11, ne
+ csel x8, x8, x13, ne
+ csel x9, x9, x14, ne
+ stp x10, x8, [x0, #24]
+ str x9, [x0, #40]
+ ret
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 2
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: // @mcl_fpDbl_sub3L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x15, x1, [x1]
+ subs x14, x15, x14
+ ldr x15, [x3, #16]
+ str x14, [x0]
+ ldp x14, x2, [x3]
+ sbcs x18, x1, x18
+ sbcs x12, x16, x12
+ stp x18, x12, [x0, #8]
+ sbcs x12, x17, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x15, xzr, ne
+ csel x11, x2, xzr, ne
+ csel x13, x14, xzr, ne
+ adds x12, x13, x12
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #24]
+ adcs x8, x10, x9
+ str x8, [x0, #40]
+ ret
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 2
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: // @mcl_fp_mulUnitPre4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ mul x12, x8, x2
+ mul x13, x9, x2
+ umulh x8, x8, x2
+ mul x14, x10, x2
+ umulh x9, x9, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ umulh x11, x11, x2
+ adds x8, x8, x13
+ stp x12, x8, [x0]
+ adcs x8, x9, x14
+ str x8, [x0, #16]
+ adcs x8, x10, x15
+ str x8, [x0, #24]
+ adcs x8, x11, xzr
+ str x8, [x0, #32]
+ ret
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 2
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: // @mcl_fpDbl_mulPre4L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ ldp x8, x10, [x1]
+ ldp x9, x11, [x1]
+ ldp x12, x14, [x1, #16]
+ ldp x13, x1, [x1, #16]
+ ldp x15, x16, [x2]
+ ldp x17, x18, [x2, #16]
+ mul x2, x8, x15
+ umulh x3, x14, x15
+ mul x4, x14, x15
+ umulh x5, x12, x15
+ mul x6, x12, x15
+ umulh x7, x10, x15
+ mul x19, x10, x15
+ umulh x15, x8, x15
+ mul x20, x8, x16
+ mul x21, x14, x16
+ mul x22, x12, x16
+ mul x23, x10, x16
+ umulh x24, x14, x16
+ umulh x25, x12, x16
+ umulh x26, x10, x16
+ umulh x16, x8, x16
+ mul x27, x8, x17
+ mul x28, x14, x17
+ mul x29, x12, x17
+ mul x30, x10, x17
+ umulh x14, x14, x17
+ stp x3, x14, [sp, #16]
+ umulh x12, x12, x17
+ str x12, [sp, #8] // 8-byte Folded Spill
+ umulh x3, x10, x17
+ umulh x14, x8, x17
+ mul x17, x9, x18
+ umulh x12, x9, x18
+ mul x10, x11, x18
+ umulh x11, x11, x18
+ mul x9, x13, x18
+ umulh x13, x13, x18
+ mul x8, x1, x18
+ umulh x18, x1, x18
+ str x2, [x0]
+ adds x15, x15, x19
+ adcs x1, x7, x6
+ adcs x2, x5, x4
+ ldr x4, [sp, #16] // 8-byte Folded Reload
+ adcs x4, x4, xzr
+ adds x15, x20, x15
+ str x15, [x0, #8]
+ adcs x15, x23, x1
+ adcs x1, x22, x2
+ adcs x2, x21, x4
+ adcs x4, xzr, xzr
+ adds x15, x15, x16
+ adcs x16, x1, x26
+ adcs x1, x2, x25
+ adcs x2, x4, x24
+ adds x15, x15, x27
+ str x15, [x0, #16]
+ adcs x15, x16, x30
+ adcs x16, x1, x29
+ adcs x1, x2, x28
+ adcs x2, xzr, xzr
+ adds x14, x15, x14
+ adcs x15, x16, x3
+ ldr x16, [sp, #8] // 8-byte Folded Reload
+ adcs x16, x1, x16
+ ldr x1, [sp, #24] // 8-byte Folded Reload
+ adcs x1, x2, x1
+ adds x14, x14, x17
+ str x14, [x0, #24]
+ adcs x10, x15, x10
+ adcs x9, x16, x9
+ adcs x8, x1, x8
+ adcs x14, xzr, xzr
+ adds x10, x10, x12
+ adcs x9, x9, x11
+ stp x10, x9, [x0, #32]
+ adcs x8, x8, x13
+ str x8, [x0, #48]
+ adcs x8, x14, x18
+ str x8, [x0, #56]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 2
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: // @mcl_fpDbl_sqrPre4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x13, [x1]
+ ldp x11, x12, [x1, #16]
+ ldr x14, [x1, #16]
+ mul x15, x10, x10
+ umulh x16, x12, x10
+ mul x17, x12, x10
+ umulh x18, x14, x10
+ mul x2, x14, x10
+ umulh x3, x9, x10
+ mul x4, x9, x10
+ umulh x10, x10, x10
+ str x15, [x0]
+ adds x10, x10, x4
+ adcs x15, x3, x2
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ adds x10, x10, x4
+ mul x4, x12, x9
+ str x10, [x0, #8]
+ mul x10, x9, x9
+ adcs x10, x15, x10
+ mul x15, x14, x9
+ adcs x17, x17, x15
+ adcs x16, x16, x4
+ adcs x4, xzr, xzr
+ adds x10, x10, x3
+ umulh x3, x9, x9
+ adcs x17, x17, x3
+ umulh x3, x12, x9
+ umulh x9, x14, x9
+ adcs x16, x16, x9
+ adcs x3, x4, x3
+ ldr x1, [x1, #24]
+ adds x10, x10, x2
+ mul x2, x12, x14
+ str x10, [x0, #16]
+ mul x10, x14, x14
+ umulh x12, x12, x14
+ umulh x14, x14, x14
+ adcs x15, x17, x15
+ mul x17, x8, x1
+ adcs x10, x16, x10
+ mul x16, x11, x1
+ adcs x2, x3, x2
+ adcs x3, xzr, xzr
+ adds x15, x15, x18
+ mul x18, x13, x1
+ adcs x9, x10, x9
+ mul x10, x1, x1
+ umulh x8, x8, x1
+ umulh x13, x13, x1
+ umulh x11, x11, x1
+ umulh x1, x1, x1
+ adcs x14, x2, x14
+ adcs x12, x3, x12
+ adds x15, x15, x17
+ adcs x9, x9, x18
+ adcs x14, x14, x16
+ adcs x10, x12, x10
+ adcs x12, xzr, xzr
+ adds x8, x9, x8
+ stp x15, x8, [x0, #24]
+ adcs x8, x14, x13
+ str x8, [x0, #40]
+ adcs x8, x10, x11
+ str x8, [x0, #48]
+ adcs x8, x12, x1
+ str x8, [x0, #56]
+ ret
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 2
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: // @mcl_fp_mont4L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #16 // =16
+ str x0, [sp, #8] // 8-byte Folded Spill
+ ldp x13, x16, [x1, #16]
+ ldp x14, x15, [x1]
+ ldur x0, [x3, #-8]
+ ldp x9, x8, [x3, #16]
+ ldp x11, x10, [x3]
+ ldp x17, x18, [x2]
+ ldp x1, x2, [x2, #16]
+ umulh x3, x16, x17
+ mul x4, x16, x17
+ umulh x5, x13, x17
+ mul x6, x13, x17
+ umulh x7, x15, x17
+ mul x19, x15, x17
+ umulh x20, x14, x17
+ mul x17, x14, x17
+ umulh x21, x18, x16
+ mul x22, x18, x16
+ umulh x23, x18, x13
+ mul x24, x18, x13
+ umulh x25, x18, x15
+ mul x26, x18, x15
+ umulh x27, x18, x14
+ mul x18, x18, x14
+ umulh x28, x1, x16
+ adds x19, x20, x19
+ mul x20, x17, x0
+ adcs x6, x7, x6
+ mul x7, x20, x8
+ mul x29, x20, x9
+ mul x30, x20, x10
+ adcs x4, x5, x4
+ umulh x5, x20, x11
+ adcs x3, x3, xzr
+ adds x5, x5, x30
+ umulh x30, x20, x10
+ adcs x29, x30, x29
+ umulh x30, x20, x9
+ adcs x7, x30, x7
+ umulh x30, x20, x8
+ mul x20, x20, x11
+ adcs x30, x30, xzr
+ cmn x20, x17
+ mul x17, x1, x16
+ umulh x20, x1, x13
+ adcs x5, x5, x19
+ mul x19, x1, x13
+ adcs x6, x29, x6
+ umulh x29, x1, x15
+ adcs x4, x7, x4
+ mul x7, x1, x15
+ adcs x3, x30, x3
+ adcs x30, xzr, xzr
+ adds x26, x27, x26
+ umulh x27, x1, x14
+ mul x1, x1, x14
+ adcs x24, x25, x24
+ umulh x25, x2, x16
+ mul x16, x2, x16
+ adcs x22, x23, x22
+ adcs x21, x21, xzr
+ adds x18, x5, x18
+ adcs x5, x6, x26
+ mul x6, x18, x0
+ adcs x4, x4, x24
+ mul x23, x6, x8
+ mul x24, x6, x9
+ mul x26, x6, x10
+ adcs x3, x3, x22
+ umulh x22, x6, x11
+ adcs x21, x30, x21
+ adcs x30, xzr, xzr
+ adds x22, x22, x26
+ umulh x26, x6, x10
+ adcs x24, x26, x24
+ umulh x26, x6, x9
+ adcs x23, x26, x23
+ umulh x26, x6, x8
+ mul x6, x6, x11
+ adcs x26, x26, xzr
+ cmn x6, x18
+ umulh x18, x2, x13
+ mul x13, x2, x13
+ umulh x6, x2, x15
+ mul x15, x2, x15
+ umulh x12, x2, x14
+ mul x14, x2, x14
+ adcs x2, x22, x5
+ adcs x4, x24, x4
+ adcs x3, x23, x3
+ adcs x5, x26, x21
+ adcs x21, x30, xzr
+ adds x7, x27, x7
+ adcs x19, x29, x19
+ adcs x17, x20, x17
+ adcs x20, x28, xzr
+ adds x1, x2, x1
+ adcs x2, x4, x7
+ mul x4, x1, x0
+ adcs x3, x3, x19
+ mul x7, x4, x8
+ mul x19, x4, x9
+ mul x22, x4, x10
+ adcs x17, x5, x17
+ umulh x5, x4, x11
+ adcs x20, x21, x20
+ adcs x21, xzr, xzr
+ adds x5, x5, x22
+ umulh x22, x4, x10
+ adcs x19, x22, x19
+ umulh x22, x4, x9
+ adcs x7, x22, x7
+ umulh x22, x4, x8
+ mul x4, x4, x11
+ adcs x22, x22, xzr
+ cmn x4, x1
+ adcs x1, x5, x2
+ adcs x2, x19, x3
+ adcs x17, x7, x17
+ adcs x3, x22, x20
+ adcs x4, x21, xzr
+ adds x12, x12, x15
+ adcs x13, x6, x13
+ adcs x15, x18, x16
+ adcs x16, x25, xzr
+ adds x14, x1, x14
+ adcs x12, x2, x12
+ mul x18, x14, x0
+ adcs x13, x17, x13
+ umulh x17, x18, x8
+ mul x0, x18, x8
+ umulh x1, x18, x9
+ mul x2, x18, x9
+ umulh x5, x18, x10
+ mul x6, x18, x10
+ umulh x7, x18, x11
+ mul x18, x18, x11
+ adcs x15, x3, x15
+ adcs x16, x4, x16
+ adcs x3, xzr, xzr
+ adds x4, x7, x6
+ adcs x2, x5, x2
+ adcs x0, x1, x0
+ adcs x17, x17, xzr
+ cmn x18, x14
+ adcs x12, x4, x12
+ adcs x13, x2, x13
+ adcs x14, x0, x15
+ adcs x15, x17, x16
+ adcs x16, x3, xzr
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x11, x12, x11, ne
+ csel x10, x13, x10, ne
+ csel x9, x14, x9, ne
+ csel x8, x15, x8, ne
+ ldr x12, [sp, #8] // 8-byte Folded Reload
+ stp x11, x10, [x12]
+ stp x9, x8, [x12, #16]
+ add sp, sp, #16 // =16
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 2
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: // @mcl_fp_montNF4L
+// BB#0:
+ stp x28, x27, [sp, #-80]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ ldp x14, x15, [x1, #16]
+ ldp x13, x16, [x1]
+ ldur x12, [x3, #-8]
+ ldp x9, x8, [x3, #16]
+ ldp x11, x10, [x3]
+ ldp x17, x18, [x2]
+ ldp x1, x2, [x2, #16]
+ umulh x3, x15, x17
+ mul x4, x15, x17
+ umulh x5, x14, x17
+ mul x6, x14, x17
+ umulh x7, x16, x17
+ mul x19, x16, x17
+ umulh x20, x13, x17
+ mul x17, x13, x17
+ umulh x21, x18, x15
+ mul x22, x18, x15
+ umulh x23, x18, x14
+ mul x24, x18, x14
+ umulh x25, x18, x16
+ mul x26, x18, x16
+ umulh x27, x18, x13
+ mul x18, x18, x13
+ adds x19, x20, x19
+ umulh x20, x1, x15
+ adcs x6, x7, x6
+ mul x7, x17, x12
+ adcs x4, x5, x4
+ mul x5, x7, x11
+ adcs x3, x3, xzr
+ cmn x5, x17
+ mul x17, x1, x15
+ mul x5, x7, x10
+ adcs x5, x5, x19
+ mul x19, x7, x9
+ adcs x6, x19, x6
+ mul x19, x7, x8
+ adcs x4, x19, x4
+ umulh x19, x7, x11
+ adcs x3, x3, xzr
+ adds x5, x5, x19
+ umulh x19, x7, x10
+ adcs x6, x6, x19
+ umulh x19, x7, x9
+ adcs x4, x4, x19
+ umulh x19, x1, x14
+ umulh x7, x7, x8
+ adcs x3, x3, x7
+ mul x7, x1, x14
+ adds x26, x27, x26
+ umulh x27, x1, x16
+ adcs x24, x25, x24
+ mul x25, x1, x16
+ adcs x22, x23, x22
+ umulh x23, x1, x13
+ mul x1, x1, x13
+ adcs x21, x21, xzr
+ adds x18, x18, x5
+ umulh x5, x2, x15
+ mul x15, x2, x15
+ adcs x6, x26, x6
+ umulh x26, x2, x14
+ mul x14, x2, x14
+ adcs x4, x24, x4
+ mul x24, x18, x12
+ adcs x3, x22, x3
+ mul x22, x24, x11
+ adcs x21, x21, xzr
+ cmn x22, x18
+ umulh x18, x2, x16
+ mul x16, x2, x16
+ umulh x22, x2, x13
+ mul x13, x2, x13
+ mul x2, x24, x10
+ adcs x2, x2, x6
+ mul x6, x24, x9
+ adcs x4, x6, x4
+ mul x6, x24, x8
+ adcs x3, x6, x3
+ umulh x6, x24, x11
+ adcs x21, x21, xzr
+ adds x2, x2, x6
+ umulh x6, x24, x10
+ adcs x4, x4, x6
+ umulh x6, x24, x9
+ adcs x3, x3, x6
+ umulh x6, x24, x8
+ adcs x6, x21, x6
+ adds x21, x23, x25
+ adcs x7, x27, x7
+ adcs x17, x19, x17
+ adcs x19, x20, xzr
+ adds x1, x1, x2
+ adcs x2, x21, x4
+ mul x4, x1, x12
+ adcs x3, x7, x3
+ mul x7, x4, x8
+ mul x20, x4, x9
+ adcs x17, x17, x6
+ mul x6, x4, x11
+ adcs x19, x19, xzr
+ cmn x6, x1
+ mul x1, x4, x10
+ umulh x6, x4, x8
+ adcs x1, x1, x2
+ umulh x2, x4, x9
+ adcs x3, x20, x3
+ umulh x20, x4, x10
+ umulh x4, x4, x11
+ adcs x17, x7, x17
+ adcs x7, x19, xzr
+ adds x1, x1, x4
+ adcs x3, x3, x20
+ adcs x17, x17, x2
+ adcs x2, x7, x6
+ adds x16, x22, x16
+ adcs x14, x18, x14
+ adcs x15, x26, x15
+ adcs x18, x5, xzr
+ adds x13, x13, x1
+ adcs x16, x16, x3
+ mul x12, x13, x12
+ adcs x14, x14, x17
+ mul x17, x12, x8
+ mul x1, x12, x9
+ mul x3, x12, x10
+ mul x4, x12, x11
+ umulh x5, x12, x8
+ umulh x6, x12, x9
+ umulh x7, x12, x10
+ umulh x12, x12, x11
+ adcs x15, x15, x2
+ adcs x18, x18, xzr
+ cmn x4, x13
+ adcs x13, x3, x16
+ adcs x14, x1, x14
+ adcs x15, x17, x15
+ adcs x16, x18, xzr
+ adds x12, x13, x12
+ adcs x13, x14, x7
+ adcs x14, x15, x6
+ adcs x15, x16, x5
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ cmp x8, #0 // =0
+ csel x11, x12, x11, lt
+ csel x10, x13, x10, lt
+ csel x9, x14, x9, lt
+ csel x8, x15, x8, lt
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #80
+ ret
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 2
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: // @mcl_fp_montRed4L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldur x12, [x2, #-8]
+ ldp x9, x8, [x2, #16]
+ ldp x11, x10, [x2]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x1, #32]
+ ldp x18, x2, [x1, #16]
+ ldp x13, x1, [x1]
+ mul x3, x13, x12
+ umulh x4, x3, x8
+ mul x5, x3, x8
+ umulh x6, x3, x9
+ mul x7, x3, x9
+ umulh x19, x3, x10
+ mul x20, x3, x10
+ umulh x21, x3, x11
+ mul x3, x3, x11
+ adds x20, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x4, x4, xzr
+ cmn x13, x3
+ adcs x13, x1, x20
+ adcs x18, x18, x7
+ mul x1, x13, x12
+ adcs x2, x2, x5
+ umulh x3, x1, x8
+ mul x5, x1, x8
+ umulh x6, x1, x9
+ mul x7, x1, x9
+ umulh x19, x1, x10
+ mul x20, x1, x10
+ umulh x21, x1, x11
+ mul x1, x1, x11
+ adcs x16, x16, x4
+ adcs x17, x17, xzr
+ adcs x14, x14, xzr
+ adcs x15, x15, xzr
+ adcs x4, xzr, xzr
+ adds x20, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x3, x3, xzr
+ cmn x1, x13
+ adcs x13, x20, x18
+ adcs x18, x7, x2
+ mul x1, x13, x12
+ adcs x16, x5, x16
+ umulh x2, x1, x8
+ mul x5, x1, x8
+ umulh x6, x1, x9
+ mul x7, x1, x9
+ umulh x19, x1, x10
+ mul x20, x1, x10
+ umulh x21, x1, x11
+ mul x1, x1, x11
+ adcs x17, x3, x17
+ adcs x14, x14, xzr
+ adcs x15, x15, xzr
+ adcs x3, x4, xzr
+ adds x4, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x2, x2, xzr
+ cmn x1, x13
+ adcs x13, x4, x18
+ adcs x16, x7, x16
+ mul x12, x13, x12
+ adcs x17, x5, x17
+ umulh x18, x12, x8
+ mul x1, x12, x8
+ umulh x4, x12, x9
+ mul x5, x12, x9
+ umulh x6, x12, x10
+ mul x7, x12, x10
+ umulh x19, x12, x11
+ mul x12, x12, x11
+ adcs x14, x2, x14
+ adcs x15, x15, xzr
+ adcs x2, x3, xzr
+ adds x3, x19, x7
+ adcs x5, x6, x5
+ adcs x1, x4, x1
+ adcs x18, x18, xzr
+ cmn x12, x13
+ adcs x12, x3, x16
+ adcs x13, x5, x17
+ adcs x14, x1, x14
+ adcs x15, x18, x15
+ adcs x16, x2, xzr
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x11, x12, x11, ne
+ csel x10, x13, x10, ne
+ csel x9, x14, x9, ne
+ csel x8, x15, x8, ne
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 2
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: // @mcl_fp_addPre4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ adds x10, x10, x12
+ str x10, [x0]
+ adcs x10, x11, x13
+ adcs x8, x8, x14
+ stp x10, x8, [x0, #8]
+ adcs x9, x9, x15
+ adcs x8, xzr, xzr
+ str x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 2
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: // @mcl_fp_subPre4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x10, x12, x10
+ str x10, [x0]
+ sbcs x10, x13, x11
+ sbcs x8, x14, x8
+ stp x10, x8, [x0, #8]
+ sbcs x9, x15, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 2
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: // @mcl_fp_shr1_4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ lsr x11, x11, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ ret
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 2
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: // @mcl_fp_add4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ adds x10, x10, x12
+ adcs x12, x11, x13
+ ldp x11, x13, [x3]
+ stp x10, x12, [x0]
+ adcs x8, x8, x14
+ adcs x14, x9, x15
+ stp x8, x14, [x0, #16]
+ adcs x15, xzr, xzr
+ ldp x9, x16, [x3, #16]
+ subs x11, x10, x11
+ sbcs x10, x12, x13
+ sbcs x9, x8, x9
+ sbcs x8, x14, x16
+ sbcs x12, x15, xzr
+ and w12, w12, #0x1
+ tbnz w12, #0, .LBB59_2
+// BB#1: // %nocarry
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+.LBB59_2: // %carry
+ ret
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 2
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: // @mcl_fp_addNF4L
+// BB#0:
+ ldp x8, x9, [x1, #16]
+ ldp x10, x11, [x1]
+ ldp x12, x13, [x2]
+ ldp x14, x15, [x2, #16]
+ adds x10, x12, x10
+ adcs x11, x13, x11
+ ldp x12, x13, [x3]
+ adcs x8, x14, x8
+ ldp x14, x16, [x3, #16]
+ adcs x9, x15, x9
+ subs x12, x10, x12
+ sbcs x13, x11, x13
+ sbcs x14, x8, x14
+ sbcs x15, x9, x16
+ cmp x15, #0 // =0
+ csel x10, x10, x12, lt
+ csel x11, x11, x13, lt
+ csel x8, x8, x14, lt
+ csel x9, x9, x15, lt
+ stp x10, x11, [x0]
+ stp x8, x9, [x0, #16]
+ ret
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 2
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: // @mcl_fp_sub4L
+// BB#0:
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x8, x12, x8
+ sbcs x9, x13, x9
+ stp x8, x9, [x0]
+ sbcs x10, x14, x10
+ sbcs x11, x15, x11
+ stp x10, x11, [x0, #16]
+ ngcs x12, xzr
+ and w12, w12, #0x1
+ tbnz w12, #0, .LBB61_2
+// BB#1: // %nocarry
+ ret
+.LBB61_2: // %carry
+ ldp x12, x13, [x3, #16]
+ ldp x14, x15, [x3]
+ adds x8, x14, x8
+ adcs x9, x15, x9
+ adcs x10, x12, x10
+ adcs x11, x13, x11
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ ret
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 2
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: // @mcl_fp_subNF4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x10, x12, x10
+ sbcs x11, x13, x11
+ ldp x12, x13, [x3, #16]
+ sbcs x8, x14, x8
+ ldp x14, x16, [x3]
+ sbcs x9, x15, x9
+ asr x15, x9, #63
+ and x14, x15, x14
+ and x16, x15, x16
+ and x12, x15, x12
+ and x13, x15, x13
+ adds x10, x14, x10
+ str x10, [x0]
+ adcs x10, x16, x11
+ adcs x8, x12, x8
+ stp x10, x8, [x0, #8]
+ adcs x8, x13, x9
+ str x8, [x0, #24]
+ ret
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 2
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: // @mcl_fpDbl_add4L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x4, x2, [x2]
+ ldp x5, x6, [x1, #16]
+ ldp x18, x1, [x1]
+ adds x18, x4, x18
+ str x18, [x0]
+ ldp x18, x4, [x3, #16]
+ adcs x1, x2, x1
+ ldp x2, x3, [x3]
+ adcs x16, x16, x5
+ stp x1, x16, [x0, #8]
+ adcs x16, x17, x6
+ str x16, [x0, #24]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x12, x2
+ sbcs x14, x13, x3
+ sbcs x15, x8, x18
+ sbcs x16, x9, x4
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x12, x11, ne
+ csel x11, x13, x14, ne
+ csel x8, x8, x15, ne
+ csel x9, x9, x16, ne
+ stp x10, x11, [x0, #32]
+ stp x8, x9, [x0, #48]
+ ret
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 2
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: // @mcl_fpDbl_sub4L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x5, x6, [x1, #16]
+ ldp x4, x1, [x1]
+ subs x18, x4, x18
+ str x18, [x0]
+ ldp x18, x4, [x3, #16]
+ sbcs x1, x1, x2
+ ldp x2, x3, [x3]
+ sbcs x16, x5, x16
+ stp x1, x16, [x0, #8]
+ sbcs x16, x6, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x4, xzr, ne
+ csel x11, x18, xzr, ne
+ csel x14, x3, xzr, ne
+ csel x15, x2, xzr, ne
+ adds x12, x15, x12
+ stp x16, x12, [x0, #24]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #40]
+ adcs x8, x10, x9
+ str x8, [x0, #56]
+ ret
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 2
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: // @mcl_fp_mulUnitPre5L
+// BB#0:
+ ldp x12, x8, [x1, #24]
+ ldp x9, x10, [x1]
+ ldr x11, [x1, #16]
+ mul x13, x9, x2
+ mul x14, x10, x2
+ umulh x9, x9, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x8, x2
+ umulh x12, x12, x2
+ umulh x8, x8, x2
+ adds x9, x9, x14
+ stp x13, x9, [x0]
+ adcs x9, x10, x15
+ str x9, [x0, #16]
+ adcs x9, x11, x16
+ str x9, [x0, #24]
+ adcs x9, x12, x17
+ adcs x8, x8, xzr
+ stp x9, x8, [x0, #32]
+ ret
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 2
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: // @mcl_fpDbl_mulPre5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #176 // =176
+ ldp x8, x10, [x1]
+ ldp x9, x15, [x1]
+ ldp x11, x12, [x1, #24]
+ ldp x13, x14, [x2]
+ ldp x16, x18, [x1, #16]
+ ldr x17, [x1, #16]
+ ldr x3, [x1, #32]
+ ldp x4, x5, [x2, #16]
+ mul x6, x8, x13
+ str x6, [sp, #72] // 8-byte Folded Spill
+ umulh x6, x12, x13
+ str x6, [sp, #168] // 8-byte Folded Spill
+ mul x6, x12, x13
+ str x6, [sp, #152] // 8-byte Folded Spill
+ umulh x6, x11, x13
+ str x6, [sp, #112] // 8-byte Folded Spill
+ mul x6, x11, x13
+ str x6, [sp, #64] // 8-byte Folded Spill
+ umulh x6, x17, x13
+ mul x23, x17, x13
+ umulh x24, x10, x13
+ mul x25, x10, x13
+ umulh x7, x8, x13
+ mul x26, x8, x14
+ mul x13, x12, x14
+ str x13, [sp, #104] // 8-byte Folded Spill
+ mul x13, x11, x14
+ stp x13, x6, [sp, #40]
+ mul x29, x17, x14
+ mul x30, x10, x14
+ umulh x12, x12, x14
+ umulh x11, x11, x14
+ str x11, [sp, #96] // 8-byte Folded Spill
+ umulh x11, x17, x14
+ umulh x27, x10, x14
+ umulh x20, x8, x14
+ mul x8, x9, x4
+ stp x8, x11, [sp, #24]
+ mul x8, x3, x4
+ stp x8, x12, [sp, #136]
+ mul x8, x18, x4
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x8, x16, x4
+ str x8, [sp, #16] // 8-byte Folded Spill
+ mul x28, x15, x4
+ umulh x8, x3, x4
+ str x8, [sp, #160] // 8-byte Folded Spill
+ umulh x8, x18, x4
+ str x8, [sp, #128] // 8-byte Folded Spill
+ umulh x8, x16, x4
+ str x8, [sp, #80] // 8-byte Folded Spill
+ umulh x8, x15, x4
+ str x8, [sp, #8] // 8-byte Folded Spill
+ umulh x22, x9, x4
+ mul x8, x3, x5
+ str x8, [sp, #120] // 8-byte Folded Spill
+ umulh x8, x3, x5
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x6, x18, x5
+ umulh x21, x18, x5
+ mul x3, x16, x5
+ umulh x19, x16, x5
+ mul x17, x15, x5
+ umulh x4, x15, x5
+ mul x16, x9, x5
+ umulh x18, x9, x5
+ ldr x2, [x2, #32]
+ ldp x10, x5, [x1, #16]
+ ldp x8, x9, [x1]
+ ldr x1, [x1, #32]
+ mul x15, x8, x2
+ umulh x14, x8, x2
+ mul x12, x9, x2
+ umulh x13, x9, x2
+ mul x11, x10, x2
+ umulh x10, x10, x2
+ mul x9, x5, x2
+ umulh x5, x5, x2
+ mul x8, x1, x2
+ umulh x1, x1, x2
+ ldr x2, [sp, #72] // 8-byte Folded Reload
+ str x2, [x0]
+ adds x2, x7, x25
+ adcs x7, x24, x23
+ ldr x23, [sp, #64] // 8-byte Folded Reload
+ ldr x24, [sp, #48] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ ldr x24, [sp, #152] // 8-byte Folded Reload
+ ldr x25, [sp, #112] // 8-byte Folded Reload
+ adcs x24, x25, x24
+ ldr x25, [sp, #168] // 8-byte Folded Reload
+ adcs x25, x25, xzr
+ adds x2, x26, x2
+ str x2, [x0, #8]
+ adcs x2, x30, x7
+ adcs x7, x29, x23
+ ldr x23, [sp, #40] // 8-byte Folded Reload
+ adcs x23, x23, x24
+ ldr x24, [sp, #104] // 8-byte Folded Reload
+ adcs x24, x24, x25
+ adcs x25, xzr, xzr
+ adds x2, x2, x20
+ adcs x7, x7, x27
+ ldr x20, [sp, #32] // 8-byte Folded Reload
+ adcs x20, x23, x20
+ ldr x23, [sp, #96] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ adcs x24, x25, x24
+ ldr x25, [sp, #24] // 8-byte Folded Reload
+ adds x2, x25, x2
+ str x2, [x0, #16]
+ adcs x2, x28, x7
+ ldr x7, [sp, #16] // 8-byte Folded Reload
+ adcs x7, x7, x20
+ ldr x20, [sp, #88] // 8-byte Folded Reload
+ adcs x20, x20, x23
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ adcs x23, x23, x24
+ adcs x24, xzr, xzr
+ adds x2, x2, x22
+ ldr x22, [sp, #8] // 8-byte Folded Reload
+ adcs x7, x7, x22
+ ldr x22, [sp, #80] // 8-byte Folded Reload
+ adcs x20, x20, x22
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ adcs x22, x23, x22
+ ldr x23, [sp, #160] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ adds x16, x16, x2
+ str x16, [x0, #24]
+ adcs x16, x17, x7
+ adcs x17, x3, x20
+ adcs x2, x6, x22
+ ldr x3, [sp, #120] // 8-byte Folded Reload
+ adcs x3, x3, x23
+ adcs x6, xzr, xzr
+ adds x16, x16, x18
+ adcs x17, x17, x4
+ adcs x18, x2, x19
+ adcs x2, x3, x21
+ ldr x3, [sp, #56] // 8-byte Folded Reload
+ adcs x3, x6, x3
+ adds x15, x15, x16
+ str x15, [x0, #32]
+ adcs x12, x12, x17
+ adcs x11, x11, x18
+ adcs x9, x9, x2
+ adcs x8, x8, x3
+ adcs x15, xzr, xzr
+ adds x12, x12, x14
+ adcs x11, x11, x13
+ stp x12, x11, [x0, #40]
+ adcs x9, x9, x10
+ adcs x8, x8, x5
+ stp x9, x8, [x0, #56]
+ adcs x8, x15, x1
+ str x8, [x0, #72]
+ add sp, sp, #176 // =176
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 2
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: // @mcl_fpDbl_sqrPre5L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldp x12, x15, [x1]
+ ldp x13, x14, [x1, #24]
+ ldr x16, [x1, #16]
+ mul x17, x12, x12
+ mul x18, x14, x12
+ mul x2, x11, x12
+ umulh x3, x16, x12
+ mul x4, x16, x12
+ umulh x5, x9, x12
+ mul x6, x9, x12
+ str x17, [x0]
+ umulh x17, x12, x12
+ adds x17, x17, x6
+ adcs x4, x5, x4
+ adcs x2, x3, x2
+ umulh x3, x11, x12
+ adcs x18, x3, x18
+ umulh x12, x14, x12
+ adcs x12, x12, xzr
+ adds x17, x6, x17
+ ldr x3, [x1]
+ str x17, [x0, #8]
+ mul x17, x9, x9
+ adcs x17, x17, x4
+ mul x4, x16, x9
+ adcs x2, x4, x2
+ mul x4, x11, x9
+ adcs x18, x4, x18
+ mul x4, x14, x9
+ adcs x12, x4, x12
+ adcs x4, xzr, xzr
+ adds x17, x17, x5
+ umulh x5, x9, x9
+ adcs x2, x2, x5
+ umulh x5, x16, x9
+ adcs x18, x18, x5
+ ldr x5, [x1, #8]
+ umulh x11, x11, x9
+ adcs x11, x12, x11
+ ldr x12, [x1, #24]
+ umulh x9, x14, x9
+ adcs x9, x4, x9
+ mul x4, x3, x16
+ adds x17, x4, x17
+ mul x4, x14, x16
+ str x17, [x0, #16]
+ mul x17, x5, x16
+ adcs x17, x17, x2
+ mul x2, x16, x16
+ adcs x18, x2, x18
+ mul x2, x12, x16
+ adcs x11, x2, x11
+ umulh x2, x3, x16
+ adcs x9, x4, x9
+ adcs x4, xzr, xzr
+ adds x17, x17, x2
+ umulh x2, x5, x16
+ adcs x18, x18, x2
+ umulh x2, x16, x16
+ adcs x11, x11, x2
+ umulh x14, x14, x16
+ umulh x16, x12, x16
+ adcs x9, x9, x16
+ ldr x16, [x1, #32]
+ adcs x14, x4, x14
+ mul x1, x3, x12
+ adds x17, x1, x17
+ mul x1, x16, x12
+ str x17, [x0, #24]
+ mul x17, x5, x12
+ adcs x17, x17, x18
+ mul x18, x10, x12
+ adcs x11, x18, x11
+ mul x18, x12, x12
+ adcs x9, x18, x9
+ umulh x18, x16, x12
+ umulh x2, x3, x12
+ adcs x14, x1, x14
+ adcs x1, xzr, xzr
+ adds x17, x17, x2
+ umulh x2, x10, x12
+ umulh x3, x5, x12
+ umulh x12, x12, x12
+ adcs x11, x11, x3
+ mul x3, x8, x16
+ adcs x9, x9, x2
+ mul x2, x13, x16
+ adcs x12, x14, x12
+ mul x14, x10, x16
+ adcs x18, x1, x18
+ mul x1, x15, x16
+ adds x17, x17, x3
+ mul x3, x16, x16
+ umulh x8, x8, x16
+ umulh x15, x15, x16
+ umulh x10, x10, x16
+ umulh x13, x13, x16
+ umulh x16, x16, x16
+ str x17, [x0, #32]
+ adcs x11, x11, x1
+ adcs x9, x9, x14
+ adcs x12, x12, x2
+ adcs x14, x18, x3
+ adcs x17, xzr, xzr
+ adds x8, x11, x8
+ str x8, [x0, #40]
+ adcs x8, x9, x15
+ str x8, [x0, #48]
+ adcs x8, x12, x10
+ str x8, [x0, #56]
+ adcs x8, x14, x13
+ str x8, [x0, #64]
+ adcs x8, x17, x16
+ str x8, [x0, #72]
+ ret
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 2
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: // @mcl_fp_mont5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #80 // =80
+ str x0, [sp, #72] // 8-byte Folded Spill
+ ldp x16, x10, [x1, #24]
+ ldp x18, x0, [x1, #8]
+ ldr x17, [x1]
+ ldur x9, [x3, #-8]
+ str x9, [sp, #16] // 8-byte Folded Spill
+ ldp x11, x8, [x3, #24]
+ ldp x14, x12, [x3, #8]
+ ldr x13, [x3]
+ ldp x3, x1, [x2]
+ ldp x4, x5, [x2, #16]
+ ldr x2, [x2, #32]
+ umulh x6, x10, x3
+ mul x7, x10, x3
+ umulh x19, x16, x3
+ mul x20, x16, x3
+ umulh x21, x0, x3
+ mul x22, x0, x3
+ umulh x23, x18, x3
+ mul x24, x18, x3
+ umulh x25, x17, x3
+ mul x3, x17, x3
+ umulh x26, x1, x10
+ mul x27, x1, x10
+ umulh x28, x1, x16
+ adds x24, x25, x24
+ mul x25, x3, x9
+ adcs x22, x23, x22
+ mul x23, x25, x8
+ mul x29, x25, x11
+ mul x30, x25, x12
+ adcs x20, x21, x20
+ mul x21, x25, x14
+ adcs x7, x19, x7
+ umulh x19, x25, x13
+ adcs x6, x6, xzr
+ adds x19, x19, x21
+ umulh x21, x25, x14
+ adcs x21, x21, x30
+ umulh x30, x25, x12
+ adcs x29, x30, x29
+ umulh x30, x25, x11
+ adcs x23, x30, x23
+ umulh x30, x25, x8
+ mul x25, x25, x13
+ adcs x30, x30, xzr
+ cmn x25, x3
+ mul x3, x1, x16
+ umulh x25, x1, x0
+ adcs x19, x19, x24
+ mul x24, x1, x0
+ adcs x21, x21, x22
+ umulh x22, x1, x18
+ adcs x20, x29, x20
+ mul x29, x1, x18
+ adcs x7, x23, x7
+ umulh x23, x1, x17
+ mul x1, x1, x17
+ adcs x6, x30, x6
+ adcs x30, xzr, xzr
+ adds x23, x23, x29
+ umulh x29, x4, x10
+ adcs x22, x22, x24
+ mul x24, x4, x10
+ adcs x3, x25, x3
+ umulh x25, x4, x16
+ adcs x27, x28, x27
+ adcs x26, x26, xzr
+ adds x1, x19, x1
+ adcs x19, x21, x23
+ mul x21, x1, x9
+ adcs x20, x20, x22
+ mul x22, x21, x8
+ mul x23, x21, x11
+ mul x28, x21, x12
+ adcs x3, x7, x3
+ mul x7, x21, x14
+ adcs x6, x6, x27
+ umulh x27, x21, x13
+ adcs x26, x30, x26
+ adcs x30, xzr, xzr
+ adds x7, x27, x7
+ umulh x27, x21, x14
+ adcs x27, x27, x28
+ umulh x28, x21, x12
+ adcs x23, x28, x23
+ umulh x28, x21, x11
+ adcs x22, x28, x22
+ umulh x28, x21, x8
+ mul x21, x21, x13
+ adcs x28, x28, xzr
+ cmn x21, x1
+ mul x1, x4, x16
+ umulh x21, x4, x0
+ adcs x7, x7, x19
+ mul x19, x4, x0
+ adcs x20, x27, x20
+ umulh x27, x4, x18
+ adcs x3, x23, x3
+ mul x23, x4, x18
+ adcs x6, x22, x6
+ umulh x22, x4, x17
+ mul x4, x4, x17
+ adcs x26, x28, x26
+ umulh x15, x5, x10
+ str x15, [sp, #64] // 8-byte Folded Spill
+ adcs x30, x30, xzr
+ adds x22, x22, x23
+ mul x15, x5, x10
+ str x15, [sp, #56] // 8-byte Folded Spill
+ adcs x19, x27, x19
+ umulh x15, x5, x16
+ str x15, [sp, #40] // 8-byte Folded Spill
+ adcs x1, x21, x1
+ mul x15, x5, x16
+ str x15, [sp, #32] // 8-byte Folded Spill
+ adcs x24, x25, x24
+ adcs x25, x29, xzr
+ adds x4, x7, x4
+ adcs x7, x20, x22
+ mul x20, x4, x9
+ adcs x3, x3, x19
+ mul x19, x20, x8
+ mul x22, x20, x11
+ mov x15, x12
+ mul x29, x20, x15
+ adcs x1, x6, x1
+ mov x21, x14
+ mul x6, x20, x21
+ adcs x24, x26, x24
+ mov x9, x13
+ umulh x26, x20, x9
+ adcs x25, x30, x25
+ adcs x30, xzr, xzr
+ adds x6, x26, x6
+ umulh x26, x20, x21
+ adcs x26, x26, x29
+ umulh x29, x20, x15
+ adcs x22, x29, x22
+ umulh x29, x20, x11
+ mov x13, x11
+ adcs x19, x29, x19
+ umulh x29, x20, x8
+ mov x12, x8
+ mul x20, x20, x9
+ mov x14, x9
+ adcs x29, x29, xzr
+ cmn x20, x4
+ umulh x4, x5, x0
+ mul x20, x5, x0
+ umulh x11, x5, x18
+ mul x9, x5, x18
+ umulh x8, x5, x17
+ mul x5, x5, x17
+ umulh x23, x2, x10
+ str x23, [sp, #48] // 8-byte Folded Spill
+ mul x10, x2, x10
+ str x10, [sp, #24] // 8-byte Folded Spill
+ umulh x10, x2, x16
+ str x10, [sp, #8] // 8-byte Folded Spill
+ mul x28, x2, x16
+ umulh x27, x2, x0
+ mul x23, x2, x0
+ umulh x16, x2, x18
+ mul x18, x2, x18
+ umulh x0, x2, x17
+ mul x17, x2, x17
+ adcs x2, x6, x7
+ adcs x3, x26, x3
+ adcs x1, x22, x1
+ adcs x6, x19, x24
+ adcs x7, x29, x25
+ adcs x19, x30, xzr
+ adds x8, x8, x9
+ adcs x9, x11, x20
+ ldr x10, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x4, x10
+ ldr x11, [sp, #56] // 8-byte Folded Reload
+ ldr x4, [sp, #40] // 8-byte Folded Reload
+ adcs x4, x4, x11
+ ldr x11, [sp, #64] // 8-byte Folded Reload
+ adcs x20, x11, xzr
+ adds x2, x2, x5
+ adcs x8, x3, x8
+ ldr x24, [sp, #16] // 8-byte Folded Reload
+ mul x3, x2, x24
+ adcs x9, x1, x9
+ mul x1, x3, x12
+ mul x5, x3, x13
+ mul x22, x3, x15
+ adcs x10, x6, x10
+ mul x6, x3, x21
+ adcs x4, x7, x4
+ umulh x7, x3, x14
+ adcs x19, x19, x20
+ adcs x20, xzr, xzr
+ adds x6, x7, x6
+ umulh x7, x3, x21
+ adcs x7, x7, x22
+ umulh x22, x3, x15
+ mov x25, x15
+ adcs x5, x22, x5
+ umulh x22, x3, x13
+ adcs x1, x22, x1
+ umulh x22, x3, x12
+ mul x3, x3, x14
+ adcs x22, x22, xzr
+ cmn x3, x2
+ adcs x8, x6, x8
+ adcs x9, x7, x9
+ adcs x10, x5, x10
+ adcs x1, x1, x4
+ adcs x2, x22, x19
+ adcs x3, x20, xzr
+ adds x11, x0, x18
+ adcs x15, x16, x23
+ adcs x16, x27, x28
+ ldr x18, [sp, #24] // 8-byte Folded Reload
+ ldr x0, [sp, #8] // 8-byte Folded Reload
+ adcs x18, x0, x18
+ ldr x0, [sp, #48] // 8-byte Folded Reload
+ adcs x4, x0, xzr
+ adds x8, x8, x17
+ adcs x9, x9, x11
+ mul x11, x8, x24
+ adcs x10, x10, x15
+ umulh x15, x11, x12
+ mul x17, x11, x12
+ umulh x5, x11, x13
+ mul x6, x11, x13
+ mov x0, x13
+ mov x20, x25
+ umulh x7, x11, x20
+ mul x19, x11, x20
+ mov x23, x20
+ mov x13, x21
+ umulh x20, x11, x13
+ mul x21, x11, x13
+ umulh x22, x11, x14
+ mul x11, x11, x14
+ adcs x16, x1, x16
+ adcs x18, x2, x18
+ adcs x1, x3, x4
+ adcs x2, xzr, xzr
+ adds x3, x22, x21
+ adcs x4, x20, x19
+ adcs x6, x7, x6
+ adcs x17, x5, x17
+ adcs x15, x15, xzr
+ cmn x11, x8
+ adcs x8, x3, x9
+ adcs x9, x4, x10
+ adcs x10, x6, x16
+ adcs x11, x17, x18
+ adcs x15, x15, x1
+ adcs x16, x2, xzr
+ subs x1, x8, x14
+ sbcs x13, x9, x13
+ sbcs x14, x10, x23
+ sbcs x17, x11, x0
+ sbcs x18, x15, x12
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x8, x8, x1, ne
+ csel x9, x9, x13, ne
+ csel x10, x10, x14, ne
+ csel x11, x11, x17, ne
+ csel x12, x15, x18, ne
+ ldr x13, [sp, #72] // 8-byte Folded Reload
+ stp x8, x9, [x13]
+ stp x10, x11, [x13, #16]
+ str x12, [x13, #32]
+ add sp, sp, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 2
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: // @mcl_fp_montNF5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ str x0, [sp, #24] // 8-byte Folded Spill
+ ldp x16, x14, [x1, #24]
+ ldp x18, x15, [x1, #8]
+ ldr x17, [x1]
+ ldur x13, [x3, #-8]
+ ldp x9, x8, [x3, #24]
+ ldp x11, x10, [x3, #8]
+ ldr x12, [x3]
+ ldp x1, x3, [x2]
+ ldp x4, x5, [x2, #16]
+ ldr x2, [x2, #32]
+ umulh x6, x14, x1
+ mul x7, x14, x1
+ umulh x19, x16, x1
+ mul x20, x16, x1
+ umulh x21, x15, x1
+ mul x22, x15, x1
+ umulh x23, x18, x1
+ mul x24, x18, x1
+ umulh x25, x17, x1
+ mul x1, x17, x1
+ umulh x26, x3, x14
+ mul x27, x3, x14
+ umulh x28, x3, x16
+ mul x29, x3, x16
+ umulh x30, x3, x15
+ adds x24, x25, x24
+ mul x25, x3, x15
+ adcs x22, x23, x22
+ umulh x23, x3, x18
+ adcs x20, x21, x20
+ mul x21, x1, x13
+ adcs x7, x19, x7
+ mul x19, x21, x12
+ adcs x6, x6, xzr
+ cmn x19, x1
+ mul x1, x3, x18
+ mul x19, x21, x11
+ adcs x19, x19, x24
+ mul x24, x21, x10
+ adcs x22, x24, x22
+ mul x24, x21, x9
+ adcs x20, x24, x20
+ mul x24, x21, x8
+ adcs x7, x24, x7
+ umulh x24, x21, x12
+ adcs x6, x6, xzr
+ adds x19, x19, x24
+ umulh x24, x21, x11
+ adcs x22, x22, x24
+ umulh x24, x21, x10
+ adcs x20, x20, x24
+ umulh x24, x21, x9
+ adcs x7, x7, x24
+ umulh x24, x3, x17
+ mul x3, x3, x17
+ umulh x21, x21, x8
+ adcs x6, x6, x21
+ umulh x21, x4, x14
+ adds x1, x24, x1
+ mul x24, x4, x14
+ adcs x23, x23, x25
+ umulh x25, x4, x16
+ adcs x29, x30, x29
+ mul x30, x4, x16
+ adcs x27, x28, x27
+ umulh x28, x4, x15
+ adcs x26, x26, xzr
+ adds x3, x3, x19
+ mul x19, x4, x15
+ adcs x1, x1, x22
+ umulh x22, x4, x18
+ adcs x20, x23, x20
+ mul x23, x4, x18
+ adcs x7, x29, x7
+ mul x29, x3, x13
+ adcs x6, x27, x6
+ mul x27, x29, x12
+ adcs x26, x26, xzr
+ cmn x27, x3
+ umulh x3, x4, x17
+ mul x4, x4, x17
+ mul x27, x29, x11
+ adcs x1, x27, x1
+ mul x27, x29, x10
+ adcs x20, x27, x20
+ mul x27, x29, x9
+ adcs x7, x27, x7
+ mul x27, x29, x8
+ adcs x6, x27, x6
+ umulh x27, x29, x12
+ adcs x26, x26, xzr
+ adds x1, x1, x27
+ umulh x27, x29, x11
+ adcs x20, x20, x27
+ umulh x27, x29, x10
+ adcs x7, x7, x27
+ umulh x27, x29, x9
+ adcs x6, x6, x27
+ umulh x27, x5, x14
+ umulh x29, x29, x8
+ adcs x26, x26, x29
+ mul x29, x5, x14
+ adds x3, x3, x23
+ umulh x23, x5, x16
+ adcs x19, x22, x19
+ mul x22, x5, x16
+ adcs x28, x28, x30
+ umulh x30, x5, x15
+ adcs x24, x25, x24
+ mul x25, x5, x15
+ adcs x21, x21, xzr
+ adds x1, x4, x1
+ umulh x4, x5, x18
+ adcs x3, x3, x20
+ mul x20, x5, x18
+ adcs x7, x19, x7
+ umulh x19, x5, x17
+ mul x5, x5, x17
+ adcs x6, x28, x6
+ mul x28, x1, x13
+ adcs x24, x24, x26
+ mul x26, x28, x12
+ adcs x21, x21, xzr
+ cmn x26, x1
+ umulh x0, x2, x14
+ mul x14, x2, x14
+ stp x14, x0, [sp, #8]
+ umulh x26, x2, x16
+ mul x1, x2, x16
+ umulh x0, x2, x15
+ mul x16, x2, x15
+ umulh x15, x2, x18
+ mul x18, x2, x18
+ umulh x14, x2, x17
+ mul x17, x2, x17
+ mul x2, x28, x11
+ adcs x2, x2, x3
+ mul x3, x28, x10
+ adcs x3, x3, x7
+ mul x7, x28, x9
+ adcs x6, x7, x6
+ mul x7, x28, x8
+ adcs x7, x7, x24
+ adcs x21, x21, xzr
+ umulh x24, x28, x12
+ adds x2, x2, x24
+ umulh x24, x28, x11
+ adcs x3, x3, x24
+ umulh x24, x28, x10
+ adcs x6, x6, x24
+ umulh x24, x28, x9
+ adcs x7, x7, x24
+ umulh x24, x28, x8
+ adcs x21, x21, x24
+ adds x19, x19, x20
+ adcs x4, x4, x25
+ adcs x20, x30, x22
+ adcs x22, x23, x29
+ adcs x23, x27, xzr
+ adds x2, x5, x2
+ adcs x3, x19, x3
+ mov x24, x13
+ mul x5, x2, x24
+ adcs x4, x4, x6
+ mul x6, x5, x8
+ mul x19, x5, x9
+ adcs x7, x20, x7
+ mul x20, x5, x10
+ adcs x21, x22, x21
+ mul x22, x5, x12
+ adcs x23, x23, xzr
+ cmn x22, x2
+ mul x2, x5, x11
+ umulh x22, x5, x8
+ adcs x2, x2, x3
+ umulh x3, x5, x9
+ adcs x4, x20, x4
+ umulh x20, x5, x10
+ adcs x7, x19, x7
+ umulh x19, x5, x11
+ umulh x5, x5, x12
+ adcs x6, x6, x21
+ adcs x21, x23, xzr
+ adds x2, x2, x5
+ adcs x4, x4, x19
+ adcs x5, x7, x20
+ adcs x3, x6, x3
+ adcs x6, x21, x22
+ adds x13, x14, x18
+ adcs x14, x15, x16
+ adcs x15, x0, x1
+ ldp x16, x18, [sp, #8]
+ adcs x16, x26, x16
+ adcs x18, x18, xzr
+ adds x17, x17, x2
+ adcs x13, x13, x4
+ mul x0, x17, x24
+ adcs x14, x14, x5
+ mul x1, x0, x8
+ mul x2, x0, x9
+ mul x4, x0, x10
+ mul x5, x0, x11
+ mul x7, x0, x12
+ umulh x19, x0, x8
+ umulh x20, x0, x9
+ umulh x21, x0, x10
+ umulh x22, x0, x11
+ umulh x0, x0, x12
+ adcs x15, x15, x3
+ adcs x16, x16, x6
+ adcs x18, x18, xzr
+ cmn x7, x17
+ adcs x13, x5, x13
+ adcs x14, x4, x14
+ adcs x15, x2, x15
+ adcs x16, x1, x16
+ adcs x17, x18, xzr
+ adds x13, x13, x0
+ adcs x14, x14, x22
+ adcs x15, x15, x21
+ adcs x16, x16, x20
+ adcs x17, x17, x19
+ subs x12, x13, x12
+ sbcs x11, x14, x11
+ sbcs x10, x15, x10
+ sbcs x9, x16, x9
+ sbcs x8, x17, x8
+ asr x18, x8, #63
+ cmp x18, #0 // =0
+ csel x12, x13, x12, lt
+ csel x11, x14, x11, lt
+ csel x10, x15, x10, lt
+ csel x9, x16, x9, lt
+ csel x8, x17, x8, lt
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ stp x12, x11, [x13]
+ stp x10, x9, [x13, #16]
+ str x8, [x13, #32]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 2
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: // @mcl_fp_montRed5L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldur x13, [x2, #-8]
+ ldp x9, x8, [x2, #24]
+ ldp x11, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x15, x16, [x1, #64]
+ ldp x17, x18, [x1, #48]
+ ldp x2, x3, [x1, #32]
+ ldp x4, x5, [x1, #16]
+ ldp x14, x1, [x1]
+ mul x6, x14, x13
+ umulh x7, x6, x8
+ mul x19, x6, x8
+ umulh x20, x6, x9
+ mul x21, x6, x9
+ umulh x22, x6, x10
+ mul x23, x6, x10
+ umulh x24, x6, x11
+ mul x25, x6, x11
+ umulh x26, x6, x12
+ mul x6, x6, x12
+ adds x25, x26, x25
+ adcs x23, x24, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ adcs x7, x7, xzr
+ cmn x14, x6
+ adcs x14, x1, x25
+ adcs x1, x4, x23
+ mul x4, x14, x13
+ adcs x5, x5, x21
+ umulh x6, x4, x8
+ mul x20, x4, x8
+ umulh x21, x4, x9
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ mul x24, x4, x10
+ umulh x25, x4, x11
+ mul x26, x4, x11
+ adcs x2, x2, x19
+ umulh x19, x4, x12
+ mul x4, x4, x12
+ adcs x3, x3, x7
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x7, xzr, xzr
+ adds x19, x19, x26
+ adcs x24, x25, x24
+ adcs x22, x23, x22
+ adcs x20, x21, x20
+ adcs x6, x6, xzr
+ cmn x4, x14
+ adcs x14, x19, x1
+ adcs x1, x24, x5
+ mul x4, x14, x13
+ adcs x2, x22, x2
+ umulh x5, x4, x8
+ mul x19, x4, x8
+ umulh x21, x4, x9
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ mul x24, x4, x10
+ umulh x25, x4, x11
+ mul x26, x4, x11
+ adcs x3, x20, x3
+ umulh x20, x4, x12
+ mul x4, x4, x12
+ adcs x17, x6, x17
+ adcs x18, x18, xzr
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x6, x7, xzr
+ adds x7, x20, x26
+ adcs x20, x25, x24
+ adcs x22, x23, x22
+ adcs x19, x21, x19
+ adcs x5, x5, xzr
+ cmn x4, x14
+ adcs x14, x7, x1
+ adcs x1, x20, x2
+ mul x2, x14, x13
+ adcs x3, x22, x3
+ umulh x4, x2, x8
+ mul x7, x2, x8
+ umulh x20, x2, x9
+ mul x21, x2, x9
+ umulh x22, x2, x10
+ mul x23, x2, x10
+ umulh x24, x2, x11
+ mul x25, x2, x11
+ umulh x26, x2, x12
+ mul x2, x2, x12
+ adcs x17, x19, x17
+ adcs x18, x5, x18
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x5, x6, xzr
+ adds x6, x26, x25
+ adcs x19, x24, x23
+ adcs x21, x22, x21
+ adcs x7, x20, x7
+ adcs x4, x4, xzr
+ cmn x2, x14
+ adcs x14, x6, x1
+ adcs x1, x19, x3
+ mul x13, x14, x13
+ adcs x17, x21, x17
+ umulh x2, x13, x8
+ mul x3, x13, x8
+ umulh x6, x13, x9
+ mul x19, x13, x9
+ umulh x20, x13, x10
+ mul x21, x13, x10
+ umulh x22, x13, x11
+ mul x23, x13, x11
+ umulh x24, x13, x12
+ mul x13, x13, x12
+ adcs x18, x7, x18
+ adcs x15, x4, x15
+ adcs x16, x16, xzr
+ adcs x4, x5, xzr
+ adds x5, x24, x23
+ adcs x7, x22, x21
+ adcs x19, x20, x19
+ adcs x3, x6, x3
+ adcs x2, x2, xzr
+ cmn x13, x14
+ adcs x13, x5, x1
+ adcs x14, x7, x17
+ adcs x17, x19, x18
+ adcs x15, x3, x15
+ adcs x16, x2, x16
+ adcs x18, x4, xzr
+ subs x12, x13, x12
+ sbcs x11, x14, x11
+ sbcs x10, x17, x10
+ sbcs x9, x15, x9
+ sbcs x8, x16, x8
+ sbcs x18, x18, xzr
+ tst x18, #0x1
+ csel x12, x13, x12, ne
+ csel x11, x14, x11, ne
+ csel x10, x17, x10, ne
+ csel x9, x15, x9, ne
+ csel x8, x16, x8, ne
+ stp x12, x11, [x0]
+ stp x10, x9, [x0, #16]
+ str x8, [x0, #32]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 2
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: // @mcl_fp_addPre5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ adds x12, x12, x14
+ str x12, [x0]
+ adcs x12, x13, x15
+ adcs x10, x10, x16
+ stp x12, x10, [x0, #8]
+ adcs x10, x11, x17
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 2
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: // @mcl_fp_subPre5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x12, x14, x12
+ str x12, [x0]
+ sbcs x12, x15, x13
+ sbcs x10, x16, x10
+ stp x12, x10, [x0, #8]
+ sbcs x10, x17, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 2
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: // @mcl_fp_shr1_5L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldr x12, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ extr x11, x12, x11, #1
+ lsr x12, x12, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ str x12, [x0, #32]
+ ret
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 2
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: // @mcl_fp_add5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ adds x12, x12, x14
+ ldr x14, [x3, #32]
+ adcs x13, x13, x15
+ adcs x10, x10, x16
+ ldp x15, x16, [x3]
+ stp x12, x13, [x0]
+ adcs x17, x11, x17
+ stp x10, x17, [x0, #16]
+ adcs x8, x8, x9
+ str x8, [x0, #32]
+ adcs x18, xzr, xzr
+ ldp x9, x1, [x3, #16]
+ subs x12, x12, x15
+ sbcs x11, x13, x16
+ sbcs x10, x10, x9
+ sbcs x9, x17, x1
+ sbcs x8, x8, x14
+ sbcs x13, x18, xzr
+ and w13, w13, #0x1
+ tbnz w13, #0, .LBB74_2
+// BB#1: // %nocarry
+ stp x12, x11, [x0]
+ stp x10, x9, [x0, #16]
+ str x8, [x0, #32]
+.LBB74_2: // %carry
+ ret
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 2
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: // @mcl_fp_addNF5L
+// BB#0:
+ ldp x11, x8, [x1, #24]
+ ldp x17, x9, [x2, #24]
+ ldp x13, x10, [x1, #8]
+ ldr x12, [x1]
+ ldp x14, x15, [x2]
+ ldr x16, [x2, #16]
+ adds x12, x14, x12
+ ldp x18, x14, [x3, #24]
+ adcs x13, x15, x13
+ adcs x10, x16, x10
+ ldp x15, x16, [x3]
+ adcs x11, x17, x11
+ ldr x17, [x3, #16]
+ adcs x8, x9, x8
+ subs x9, x12, x15
+ sbcs x15, x13, x16
+ sbcs x16, x10, x17
+ sbcs x17, x11, x18
+ sbcs x14, x8, x14
+ asr x18, x14, #63
+ cmp x18, #0 // =0
+ csel x9, x12, x9, lt
+ csel x12, x13, x15, lt
+ csel x10, x10, x16, lt
+ csel x11, x11, x17, lt
+ csel x8, x8, x14, lt
+ stp x9, x12, [x0]
+ stp x10, x11, [x0, #16]
+ str x8, [x0, #32]
+ ret
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 2
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: // @mcl_fp_sub5L
+// BB#0:
+ ldp x11, x12, [x2, #24]
+ ldp x17, x13, [x1, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x8, x14, x8
+ sbcs x9, x15, x9
+ stp x8, x9, [x0]
+ sbcs x10, x16, x10
+ sbcs x11, x17, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x13, x12
+ str x12, [x0, #32]
+ ngcs x13, xzr
+ and w13, w13, #0x1
+ tbnz w13, #0, .LBB76_2
+// BB#1: // %nocarry
+ ret
+.LBB76_2: // %carry
+ ldp x17, x13, [x3, #24]
+ ldp x14, x15, [x3]
+ ldr x16, [x3, #16]
+ adds x8, x14, x8
+ adcs x9, x15, x9
+ adcs x10, x16, x10
+ adcs x11, x17, x11
+ adcs x12, x13, x12
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ str x12, [x0, #32]
+ ret
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 2
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: // @mcl_fp_subNF5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x12, x14, x12
+ sbcs x13, x15, x13
+ ldp x1, x14, [x3, #8]
+ ldp x15, x18, [x3, #24]
+ sbcs x10, x16, x10
+ ldr x16, [x3]
+ sbcs x11, x17, x11
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ extr x17, x9, x8, #63
+ and x16, x17, x16
+ and x14, x14, x9, ror #63
+ and x15, x9, x15
+ and x17, x9, x18
+ ror x9, x9, #63
+ and x9, x9, x1
+ adds x12, x16, x12
+ adcs x9, x9, x13
+ stp x12, x9, [x0]
+ adcs x9, x14, x10
+ str x9, [x0, #16]
+ adcs x9, x15, x11
+ adcs x8, x17, x8
+ stp x9, x8, [x0, #24]
+ ret
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 2
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: // @mcl_fpDbl_add5L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #64]
+ ldp x10, x11, [x1, #64]
+ ldp x12, x13, [x2, #48]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x5, x6, [x2, #16]
+ ldp x19, x2, [x2]
+ ldp x20, x21, [x1, #16]
+ ldp x7, x1, [x1]
+ adds x7, x19, x7
+ ldr x19, [x3, #32]
+ str x7, [x0]
+ adcs x1, x2, x1
+ ldp x2, x7, [x3, #16]
+ str x1, [x0, #8]
+ ldp x1, x3, [x3]
+ adcs x5, x5, x20
+ str x5, [x0, #16]
+ adcs x5, x6, x21
+ adcs x16, x16, x18
+ stp x5, x16, [x0, #24]
+ adcs x16, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x16, x1
+ sbcs x14, x12, x3
+ sbcs x15, x13, x2
+ sbcs x17, x8, x7
+ sbcs x18, x9, x19
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x16, x11, ne
+ csel x11, x12, x14, ne
+ csel x12, x13, x15, ne
+ csel x8, x8, x17, ne
+ csel x9, x9, x18, ne
+ stp x10, x11, [x0, #40]
+ stp x12, x8, [x0, #56]
+ str x9, [x0, #72]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 2
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: // @mcl_fpDbl_sub5L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #64]
+ ldp x10, x11, [x1, #64]
+ ldp x12, x13, [x2, #48]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x5, x6, [x2, #16]
+ ldp x7, x2, [x2]
+ ldp x20, x21, [x1, #16]
+ ldp x19, x1, [x1]
+ subs x7, x19, x7
+ ldr x19, [x3, #32]
+ str x7, [x0]
+ sbcs x1, x1, x2
+ ldp x2, x7, [x3, #16]
+ str x1, [x0, #8]
+ ldp x1, x3, [x3]
+ sbcs x5, x20, x5
+ str x5, [x0, #16]
+ sbcs x5, x21, x6
+ sbcs x16, x18, x16
+ stp x5, x16, [x0, #24]
+ sbcs x16, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x19, xzr, ne
+ csel x11, x7, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x3, xzr, ne
+ csel x17, x1, xzr, ne
+ adds x16, x17, x16
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #40]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #56]
+ adcs x8, x10, x9
+ str x8, [x0, #72]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 2
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: // @mcl_fp_mulUnitPre6L
+// BB#0:
+ ldp x8, x9, [x1, #32]
+ ldp x10, x11, [x1]
+ ldp x12, x13, [x1, #16]
+ mul x14, x10, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x13, x2
+ umulh x12, x12, x2
+ mul x18, x8, x2
+ umulh x13, x13, x2
+ mul x1, x9, x2
+ umulh x8, x8, x2
+ umulh x9, x9, x2
+ adds x10, x10, x15
+ stp x14, x10, [x0]
+ adcs x10, x11, x16
+ str x10, [x0, #16]
+ adcs x10, x12, x17
+ str x10, [x0, #24]
+ adcs x10, x13, x18
+ adcs x8, x8, x1
+ stp x10, x8, [x0, #32]
+ adcs x8, x9, xzr
+ str x8, [x0, #48]
+ ret
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 2
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: // @mcl_fpDbl_mulPre6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #400 // =400
+ ldp x8, x9, [x1]
+ ldp x11, x13, [x1]
+ ldp x10, x17, [x1, #16]
+ ldp x12, x14, [x1, #32]
+ ldp x15, x16, [x2]
+ ldr x3, [x1, #32]
+ mul x30, x8, x15
+ umulh x18, x14, x15
+ str x18, [sp, #392] // 8-byte Folded Spill
+ mul x18, x14, x15
+ str x18, [sp, #384] // 8-byte Folded Spill
+ umulh x18, x12, x15
+ str x18, [sp, #376] // 8-byte Folded Spill
+ mul x18, x12, x15
+ str x18, [sp, #360] // 8-byte Folded Spill
+ umulh x18, x17, x15
+ str x18, [sp, #336] // 8-byte Folded Spill
+ mul x18, x17, x15
+ str x18, [sp, #312] // 8-byte Folded Spill
+ umulh x18, x10, x15
+ str x18, [sp, #304] // 8-byte Folded Spill
+ mul x18, x10, x15
+ str x18, [sp, #272] // 8-byte Folded Spill
+ umulh x18, x9, x15
+ str x18, [sp, #248] // 8-byte Folded Spill
+ mul x18, x9, x15
+ umulh x15, x8, x15
+ stp x15, x18, [sp, #216]
+ mul x15, x8, x16
+ str x15, [sp, #280] // 8-byte Folded Spill
+ mul x15, x14, x16
+ str x15, [sp, #352] // 8-byte Folded Spill
+ mul x15, x12, x16
+ str x15, [sp, #328] // 8-byte Folded Spill
+ mul x15, x17, x16
+ str x15, [sp, #296] // 8-byte Folded Spill
+ mul x15, x10, x16
+ str x15, [sp, #264] // 8-byte Folded Spill
+ mul x15, x9, x16
+ umulh x14, x14, x16
+ str x14, [sp, #368] // 8-byte Folded Spill
+ umulh x12, x12, x16
+ str x12, [sp, #344] // 8-byte Folded Spill
+ umulh x12, x17, x16
+ str x12, [sp, #320] // 8-byte Folded Spill
+ umulh x10, x10, x16
+ str x10, [sp, #288] // 8-byte Folded Spill
+ umulh x9, x9, x16
+ str x9, [sp, #256] // 8-byte Folded Spill
+ umulh x8, x8, x16
+ stp x8, x15, [sp, #232]
+ ldp x12, x8, [x2, #16]
+ ldr x9, [x1, #40]
+ ldp x15, x10, [x1, #16]
+ mul x14, x11, x12
+ str x14, [sp, #144] // 8-byte Folded Spill
+ mul x14, x9, x12
+ str x14, [sp, #200] // 8-byte Folded Spill
+ mul x14, x3, x12
+ str x14, [sp, #176] // 8-byte Folded Spill
+ mul x14, x10, x12
+ str x14, [sp, #160] // 8-byte Folded Spill
+ mul x14, x15, x12
+ str x14, [sp, #128] // 8-byte Folded Spill
+ mul x14, x13, x12
+ str x14, [sp, #112] // 8-byte Folded Spill
+ umulh x14, x9, x12
+ str x14, [sp, #208] // 8-byte Folded Spill
+ umulh x14, x3, x12
+ str x14, [sp, #192] // 8-byte Folded Spill
+ umulh x14, x10, x12
+ str x14, [sp, #168] // 8-byte Folded Spill
+ umulh x14, x15, x12
+ str x14, [sp, #152] // 8-byte Folded Spill
+ umulh x14, x13, x12
+ str x14, [sp, #120] // 8-byte Folded Spill
+ umulh x12, x11, x12
+ str x12, [sp, #104] // 8-byte Folded Spill
+ mul x12, x9, x8
+ str x12, [sp, #184] // 8-byte Folded Spill
+ umulh x9, x9, x8
+ str x9, [sp, #136] // 8-byte Folded Spill
+ mul x9, x3, x8
+ str x9, [sp, #80] // 8-byte Folded Spill
+ umulh x9, x3, x8
+ str x9, [sp, #96] // 8-byte Folded Spill
+ mul x9, x10, x8
+ str x9, [sp, #64] // 8-byte Folded Spill
+ umulh x9, x10, x8
+ str x9, [sp, #88] // 8-byte Folded Spill
+ mul x9, x15, x8
+ str x9, [sp, #48] // 8-byte Folded Spill
+ umulh x9, x15, x8
+ str x9, [sp, #72] // 8-byte Folded Spill
+ mul x9, x13, x8
+ str x9, [sp, #32] // 8-byte Folded Spill
+ umulh x9, x13, x8
+ str x9, [sp, #56] // 8-byte Folded Spill
+ mul x9, x11, x8
+ str x9, [sp, #24] // 8-byte Folded Spill
+ umulh x8, x11, x8
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x12, x13, [x1, #32]
+ ldp x9, x10, [x1]
+ ldp x11, x1, [x1, #16]
+ ldp x8, x2, [x2, #32]
+ mul x22, x9, x8
+ mul x28, x13, x8
+ mul x27, x12, x8
+ mul x24, x1, x8
+ mul x20, x11, x8
+ mul x19, x10, x8
+ umulh x14, x13, x8
+ str x14, [sp, #16] // 8-byte Folded Spill
+ umulh x29, x12, x8
+ umulh x26, x1, x8
+ umulh x23, x11, x8
+ umulh x21, x10, x8
+ umulh x7, x9, x8
+ mul x25, x9, x2
+ umulh x6, x9, x2
+ mul x4, x10, x2
+ umulh x5, x10, x2
+ mul x18, x11, x2
+ umulh x3, x11, x2
+ mul x16, x1, x2
+ umulh x1, x1, x2
+ mul x15, x12, x2
+ umulh x17, x12, x2
+ mul x14, x13, x2
+ umulh x13, x13, x2
+ str x30, [x0]
+ ldp x9, x8, [sp, #216]
+ adds x2, x9, x8
+ ldp x8, x30, [sp, #272]
+ ldr x9, [sp, #248] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ ldp x10, x9, [sp, #304]
+ adcs x9, x10, x9
+ ldr x10, [sp, #360] // 8-byte Folded Reload
+ ldr x11, [sp, #336] // 8-byte Folded Reload
+ adcs x10, x11, x10
+ ldp x12, x11, [sp, #376]
+ adcs x11, x12, x11
+ ldr x12, [sp, #392] // 8-byte Folded Reload
+ adcs x12, x12, xzr
+ adds x2, x30, x2
+ str x2, [x0, #8]
+ ldp x30, x2, [sp, #232]
+ adcs x8, x2, x8
+ ldr x2, [sp, #264] // 8-byte Folded Reload
+ adcs x9, x2, x9
+ ldr x2, [sp, #296] // 8-byte Folded Reload
+ adcs x10, x2, x10
+ ldr x2, [sp, #328] // 8-byte Folded Reload
+ adcs x11, x2, x11
+ ldr x2, [sp, #352] // 8-byte Folded Reload
+ adcs x12, x2, x12
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #256] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #288] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #320] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #344] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #368] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ ldr x30, [sp, #144] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #16]
+ ldp x30, x8, [sp, #104]
+ adcs x8, x8, x9
+ ldr x9, [sp, #128] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #160] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #176] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #200] // 8-byte Folded Reload
+ adcs x12, x12, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #120] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #152] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #168] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #192] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #208] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ ldr x30, [sp, #24] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #24]
+ ldp x8, x30, [sp, #32]
+ adcs x8, x8, x9
+ ldr x9, [sp, #48] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #64] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #80] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #184] // 8-byte Folded Reload
+ adcs x12, x12, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #56] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #72] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #88] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #96] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #136] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ adds x8, x22, x8
+ str x8, [x0, #32]
+ adcs x8, x19, x9
+ adcs x9, x20, x10
+ adcs x10, x24, x11
+ adcs x11, x27, x12
+ adcs x12, x28, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x7
+ adcs x9, x9, x21
+ adcs x10, x10, x23
+ adcs x11, x11, x26
+ adcs x12, x12, x29
+ ldr x7, [sp, #16] // 8-byte Folded Reload
+ adcs x2, x2, x7
+ adds x8, x25, x8
+ str x8, [x0, #40]
+ adcs x8, x4, x9
+ adcs x9, x18, x10
+ adcs x10, x16, x11
+ adcs x11, x15, x12
+ adcs x12, x14, x2
+ adcs x14, xzr, xzr
+ adds x8, x8, x6
+ str x8, [x0, #48]
+ adcs x8, x9, x5
+ str x8, [x0, #56]
+ adcs x8, x10, x3
+ str x8, [x0, #64]
+ adcs x8, x11, x1
+ str x8, [x0, #72]
+ adcs x8, x12, x17
+ str x8, [x0, #80]
+ adcs x8, x14, x13
+ str x8, [x0, #88]
+ add sp, sp, #400 // =400
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 2
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: // @mcl_fpDbl_sqrPre6L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x8, x9, [x1, #8]
+ ldp x15, x10, [x1, #32]
+ ldp x11, x13, [x1]
+ ldr x12, [x1]
+ ldp x17, x14, [x1, #32]
+ ldr x16, [x1, #24]
+ mul x18, x11, x11
+ umulh x2, x10, x11
+ mul x3, x15, x11
+ mul x4, x16, x11
+ umulh x5, x9, x11
+ mul x6, x9, x11
+ umulh x7, x8, x11
+ mul x19, x8, x11
+ str x18, [x0]
+ umulh x18, x11, x11
+ adds x18, x18, x19
+ adcs x6, x7, x6
+ adcs x4, x5, x4
+ umulh x5, x16, x11
+ adcs x3, x5, x3
+ mul x5, x10, x11
+ umulh x11, x15, x11
+ adcs x11, x11, x5
+ adcs x2, x2, xzr
+ adds x18, x19, x18
+ ldp x5, x19, [x1, #16]
+ str x18, [x0, #8]
+ mul x18, x8, x8
+ adcs x18, x18, x6
+ mul x6, x9, x8
+ adcs x4, x6, x4
+ mul x6, x16, x8
+ adcs x3, x6, x3
+ mul x6, x15, x8
+ adcs x11, x6, x11
+ mul x6, x10, x8
+ adcs x2, x6, x2
+ adcs x6, xzr, xzr
+ adds x18, x18, x7
+ ldr x7, [x1, #32]
+ umulh x10, x10, x8
+ umulh x15, x15, x8
+ umulh x16, x16, x8
+ umulh x9, x9, x8
+ umulh x8, x8, x8
+ adcs x8, x4, x8
+ adcs x9, x3, x9
+ ldp x3, x4, [x1]
+ adcs x11, x11, x16
+ mul x16, x12, x5
+ adcs x15, x2, x15
+ mul x2, x14, x5
+ adcs x10, x6, x10
+ mul x6, x7, x5
+ adds x16, x16, x18
+ mul x18, x19, x5
+ str x16, [x0, #16]
+ mul x16, x13, x5
+ adcs x8, x16, x8
+ mul x16, x5, x5
+ adcs x9, x16, x9
+ umulh x16, x7, x5
+ adcs x11, x18, x11
+ adcs x15, x6, x15
+ umulh x6, x12, x5
+ adcs x10, x2, x10
+ adcs x2, xzr, xzr
+ adds x8, x8, x6
+ umulh x6, x13, x5
+ adcs x9, x9, x6
+ umulh x6, x5, x5
+ adcs x11, x11, x6
+ umulh x6, x19, x5
+ adcs x15, x15, x6
+ adcs x10, x10, x16
+ umulh x5, x14, x5
+ adcs x2, x2, x5
+ mul x5, x12, x19
+ adds x8, x5, x8
+ ldp x16, x5, [x1, #16]
+ ldr x1, [x1, #40]
+ str x8, [x0, #24]
+ mul x8, x13, x19
+ adcs x8, x8, x9
+ mul x9, x14, x19
+ adcs x11, x18, x11
+ mul x18, x19, x19
+ adcs x15, x18, x15
+ mul x18, x7, x19
+ umulh x14, x14, x19
+ umulh x7, x7, x19
+ umulh x13, x13, x19
+ umulh x12, x12, x19
+ umulh x19, x19, x19
+ adcs x10, x18, x10
+ mul x18, x3, x17
+ adcs x9, x9, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x12
+ mul x12, x1, x17
+ adcs x11, x11, x13
+ mul x13, x5, x17
+ adcs x15, x15, x6
+ mul x6, x16, x17
+ adcs x10, x10, x19
+ mul x19, x4, x17
+ adcs x9, x9, x7
+ mul x7, x17, x17
+ adcs x14, x2, x14
+ umulh x2, x1, x17
+ adds x8, x18, x8
+ umulh x18, x5, x17
+ str x8, [x0, #32]
+ umulh x8, x16, x17
+ adcs x11, x19, x11
+ umulh x19, x4, x17
+ adcs x15, x6, x15
+ umulh x6, x3, x17
+ umulh x17, x17, x17
+ adcs x10, x13, x10
+ mul x13, x3, x1
+ adcs x9, x7, x9
+ adcs x14, x12, x14
+ adcs x7, xzr, xzr
+ adds x11, x11, x6
+ mul x6, x5, x1
+ adcs x15, x15, x19
+ mul x19, x16, x1
+ adcs x8, x10, x8
+ mul x10, x4, x1
+ adcs x9, x9, x18
+ mul x18, x1, x1
+ umulh x3, x3, x1
+ umulh x4, x4, x1
+ umulh x16, x16, x1
+ umulh x5, x5, x1
+ umulh x1, x1, x1
+ adcs x14, x14, x17
+ adcs x17, x7, x2
+ adds x11, x13, x11
+ str x11, [x0, #40]
+ adcs x10, x10, x15
+ adcs x8, x19, x8
+ adcs x9, x6, x9
+ adcs x11, x12, x14
+ adcs x12, x18, x17
+ adcs x13, xzr, xzr
+ adds x10, x10, x3
+ adcs x8, x8, x4
+ stp x10, x8, [x0, #48]
+ adcs x8, x9, x16
+ str x8, [x0, #64]
+ adcs x8, x11, x5
+ str x8, [x0, #72]
+ adcs x8, x12, x2
+ str x8, [x0, #80]
+ adcs x8, x13, x1
+ str x8, [x0, #88]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 2
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: // @mcl_fp_mont6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #48 // =48
+ str x0, [sp, #24] // 8-byte Folded Spill
+ ldr x5, [x2]
+ ldp x0, x4, [x1, #32]
+ ldp x16, x18, [x1, #16]
+ ldp x10, x1, [x1]
+ ldur x12, [x3, #-8]
+ str x12, [sp, #40] // 8-byte Folded Spill
+ ldp x11, x8, [x3, #32]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x13, x17, [x3, #16]
+ ldp x14, x15, [x3]
+ ldr x3, [x2, #8]
+ umulh x6, x4, x5
+ mul x7, x4, x5
+ umulh x19, x0, x5
+ mul x20, x0, x5
+ umulh x21, x18, x5
+ mul x22, x18, x5
+ umulh x23, x16, x5
+ mul x24, x16, x5
+ umulh x25, x1, x5
+ mul x26, x1, x5
+ umulh x27, x10, x5
+ mul x5, x10, x5
+ umulh x28, x3, x4
+ adds x26, x27, x26
+ mul x27, x5, x12
+ adcs x24, x25, x24
+ mul x25, x27, x8
+ mul x29, x27, x11
+ mul x30, x27, x17
+ adcs x22, x23, x22
+ mul x23, x27, x13
+ adcs x20, x21, x20
+ mul x21, x27, x15
+ adcs x7, x19, x7
+ umulh x19, x27, x14
+ adcs x6, x6, xzr
+ adds x19, x19, x21
+ umulh x21, x27, x15
+ adcs x21, x21, x23
+ umulh x23, x27, x13
+ adcs x23, x23, x30
+ umulh x30, x27, x17
+ adcs x29, x30, x29
+ umulh x30, x27, x11
+ adcs x25, x30, x25
+ umulh x30, x27, x8
+ mul x27, x27, x14
+ adcs x30, x30, xzr
+ cmn x27, x5
+ mul x5, x3, x4
+ umulh x27, x3, x0
+ adcs x19, x19, x26
+ mul x26, x3, x0
+ adcs x21, x21, x24
+ mul x24, x3, x18
+ adcs x22, x23, x22
+ mul x23, x3, x16
+ adcs x20, x29, x20
+ mul x29, x3, x1
+ adcs x7, x25, x7
+ umulh x25, x3, x10
+ adcs x30, x30, x6
+ adcs x6, xzr, xzr
+ adds x25, x25, x29
+ umulh x29, x3, x1
+ adcs x23, x29, x23
+ umulh x29, x3, x16
+ adcs x24, x29, x24
+ umulh x29, x3, x18
+ mul x3, x3, x10
+ adcs x26, x29, x26
+ adcs x27, x27, x5
+ adcs x29, x28, xzr
+ adds x3, x19, x3
+ adcs x5, x21, x25
+ mul x21, x3, x12
+ adcs x28, x22, x23
+ umulh x22, x21, x8
+ mul x23, x21, x8
+ mul x25, x21, x11
+ mul x9, x21, x17
+ adcs x19, x20, x24
+ mul x8, x21, x13
+ adcs x20, x7, x26
+ mul x24, x21, x15
+ adcs x30, x30, x27
+ umulh x26, x21, x14
+ adcs x6, x6, x29
+ adcs x7, xzr, xzr
+ adds x24, x26, x24
+ umulh x26, x21, x15
+ adcs x29, x26, x8
+ umulh x8, x21, x13
+ adcs x26, x8, x9
+ umulh x8, x21, x17
+ adcs x27, x8, x25
+ umulh x8, x21, x11
+ mul x9, x21, x14
+ adcs x8, x8, x23
+ adcs x21, x22, xzr
+ cmn x9, x3
+ ldp x23, x3, [x2, #16]
+ umulh x9, x23, x4
+ adcs x5, x24, x5
+ mul x22, x23, x4
+ adcs x24, x29, x28
+ mul x25, x23, x0
+ adcs x19, x26, x19
+ mul x26, x23, x18
+ adcs x20, x27, x20
+ mul x27, x23, x16
+ adcs x8, x8, x30
+ mul x28, x23, x1
+ adcs x21, x21, x6
+ umulh x6, x23, x10
+ adcs x7, x7, xzr
+ adds x6, x6, x28
+ umulh x28, x23, x1
+ adcs x27, x28, x27
+ umulh x28, x23, x16
+ adcs x26, x28, x26
+ umulh x28, x23, x18
+ adcs x25, x28, x25
+ umulh x28, x23, x0
+ mul x23, x23, x10
+ adcs x22, x28, x22
+ adcs x9, x9, xzr
+ adds x23, x5, x23
+ adcs x5, x24, x6
+ mul x29, x23, x12
+ adcs x6, x19, x27
+ ldr x12, [sp, #32] // 8-byte Folded Reload
+ mul x28, x29, x12
+ mul x27, x29, x11
+ mul x30, x29, x17
+ adcs x19, x20, x26
+ mul x26, x29, x13
+ adcs x20, x8, x25
+ mul x8, x29, x15
+ adcs x21, x21, x22
+ umulh x24, x29, x14
+ adcs x22, x7, x9
+ adcs x7, xzr, xzr
+ adds x24, x24, x8
+ umulh x8, x29, x15
+ adcs x25, x8, x26
+ umulh x8, x29, x13
+ adcs x26, x8, x30
+ umulh x8, x29, x17
+ adcs x27, x8, x27
+ umulh x8, x29, x11
+ adcs x28, x8, x28
+ umulh x8, x29, x12
+ mul x9, x29, x14
+ adcs x29, x8, xzr
+ cmn x9, x23
+ ldp x23, x8, [x2, #32]
+ umulh x30, x3, x4
+ adcs x2, x24, x5
+ mul x5, x3, x4
+ adcs x6, x25, x6
+ mul x24, x3, x0
+ adcs x19, x26, x19
+ mul x25, x3, x18
+ adcs x20, x27, x20
+ mul x26, x3, x16
+ adcs x21, x28, x21
+ mul x27, x3, x1
+ adcs x22, x29, x22
+ mov x9, x10
+ umulh x28, x3, x9
+ adcs x7, x7, xzr
+ adds x27, x28, x27
+ umulh x28, x3, x1
+ adcs x26, x28, x26
+ umulh x28, x3, x16
+ adcs x25, x28, x25
+ umulh x28, x3, x18
+ adcs x24, x28, x24
+ umulh x28, x3, x0
+ mul x3, x3, x9
+ adcs x5, x28, x5
+ adcs x29, x30, xzr
+ adds x2, x2, x3
+ adcs x3, x6, x27
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ mul x6, x2, x10
+ adcs x19, x19, x26
+ mul x26, x6, x12
+ mul x27, x6, x11
+ mov x30, x17
+ mul x28, x6, x30
+ adcs x20, x20, x25
+ mul x25, x6, x13
+ adcs x21, x21, x24
+ mov x17, x15
+ mul x24, x6, x17
+ adcs x5, x22, x5
+ umulh x22, x6, x14
+ adcs x29, x7, x29
+ adcs x7, xzr, xzr
+ adds x22, x22, x24
+ umulh x24, x6, x17
+ adcs x24, x24, x25
+ umulh x25, x6, x13
+ mov x15, x13
+ adcs x25, x25, x28
+ umulh x28, x6, x30
+ mov x13, x30
+ adcs x27, x28, x27
+ umulh x28, x6, x11
+ adcs x26, x28, x26
+ umulh x28, x6, x12
+ mul x6, x6, x14
+ adcs x28, x28, xzr
+ cmn x6, x2
+ umulh x2, x23, x4
+ mul x6, x23, x4
+ adcs x3, x22, x3
+ umulh x22, x23, x0
+ adcs x19, x24, x19
+ mul x24, x23, x0
+ adcs x20, x25, x20
+ mul x25, x23, x18
+ adcs x21, x27, x21
+ mul x27, x23, x16
+ adcs x5, x26, x5
+ mul x26, x23, x1
+ adcs x29, x28, x29
+ umulh x28, x23, x9
+ adcs x7, x7, xzr
+ adds x26, x28, x26
+ umulh x28, x23, x1
+ adcs x27, x28, x27
+ umulh x28, x23, x16
+ adcs x25, x28, x25
+ umulh x28, x23, x18
+ mul x23, x23, x9
+ adcs x24, x28, x24
+ umulh x28, x8, x4
+ str x28, [sp, #16] // 8-byte Folded Spill
+ mul x28, x8, x4
+ adcs x6, x22, x6
+ adcs x2, x2, xzr
+ adds x3, x3, x23
+ adcs x19, x19, x26
+ mul x22, x3, x10
+ adcs x20, x20, x27
+ mul x23, x22, x12
+ mul x26, x22, x11
+ mul x27, x22, x13
+ adcs x21, x21, x25
+ mul x25, x22, x15
+ adcs x5, x5, x24
+ mul x24, x22, x17
+ adcs x4, x29, x6
+ umulh x6, x22, x14
+ adcs x2, x7, x2
+ adcs x7, xzr, xzr
+ adds x6, x6, x24
+ umulh x24, x22, x17
+ adcs x24, x24, x25
+ umulh x25, x22, x15
+ adcs x25, x25, x27
+ umulh x27, x22, x13
+ adcs x26, x27, x26
+ umulh x27, x22, x11
+ adcs x23, x27, x23
+ umulh x27, x22, x12
+ mul x22, x22, x14
+ adcs x27, x27, xzr
+ cmn x22, x3
+ umulh x3, x8, x0
+ mul x0, x8, x0
+ umulh x22, x8, x18
+ mul x18, x8, x18
+ umulh x29, x8, x16
+ mul x16, x8, x16
+ umulh x30, x8, x1
+ mul x1, x8, x1
+ umulh x10, x8, x9
+ mul x8, x8, x9
+ adcs x6, x6, x19
+ adcs x19, x24, x20
+ adcs x20, x25, x21
+ adcs x5, x26, x5
+ adcs x9, x23, x4
+ str x9, [sp, #8] // 8-byte Folded Spill
+ adcs x2, x27, x2
+ adcs x7, x7, xzr
+ adds x9, x10, x1
+ adcs x16, x30, x16
+ adcs x18, x29, x18
+ adcs x0, x22, x0
+ adcs x1, x3, x28
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x3, x10, xzr
+ adds x8, x6, x8
+ adcs x9, x19, x9
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ mul x4, x8, x10
+ adcs x16, x20, x16
+ umulh x6, x4, x12
+ mul x19, x4, x12
+ mov x30, x11
+ umulh x20, x4, x30
+ mul x21, x4, x30
+ umulh x22, x4, x13
+ mul x23, x4, x13
+ mov x29, x13
+ umulh x24, x4, x15
+ mul x25, x4, x15
+ umulh x26, x4, x17
+ mul x27, x4, x17
+ umulh x28, x4, x14
+ mul x4, x4, x14
+ adcs x18, x5, x18
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x10, x10, x0
+ adcs x0, x2, x1
+ adcs x1, x7, x3
+ adcs x2, xzr, xzr
+ adds x3, x28, x27
+ adcs x5, x26, x25
+ adcs x7, x24, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ adcs x6, x6, xzr
+ cmn x4, x8
+ adcs x8, x3, x9
+ adcs x9, x5, x16
+ adcs x16, x7, x18
+ adcs x10, x21, x10
+ adcs x18, x19, x0
+ adcs x0, x6, x1
+ adcs x1, x2, xzr
+ subs x13, x8, x14
+ sbcs x12, x9, x17
+ sbcs x11, x16, x15
+ sbcs x14, x10, x29
+ sbcs x15, x18, x30
+ ldr x17, [sp, #32] // 8-byte Folded Reload
+ sbcs x17, x0, x17
+ sbcs x1, x1, xzr
+ tst x1, #0x1
+ csel x8, x8, x13, ne
+ csel x9, x9, x12, ne
+ csel x11, x16, x11, ne
+ csel x10, x10, x14, ne
+ csel x12, x18, x15, ne
+ csel x13, x0, x17, ne
+ ldr x14, [sp, #24] // 8-byte Folded Reload
+ stp x8, x9, [x14]
+ stp x11, x10, [x14, #16]
+ stp x12, x13, [x14, #32]
+ add sp, sp, #48 // =48
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 2
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: // @mcl_fp_montNF6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #112 // =112
+ str x0, [sp, #96] // 8-byte Folded Spill
+ ldp x16, x12, [x1, #32]
+ ldp x13, x11, [x1, #16]
+ ldp x17, x0, [x1]
+ ldur x18, [x3, #-8]
+ ldr x9, [x3, #32]
+ str x9, [sp, #104] // 8-byte Folded Spill
+ ldr x14, [x3, #40]
+ ldp x4, x10, [x3, #16]
+ ldr x15, [x3]
+ str x15, [sp, #8] // 8-byte Folded Spill
+ ldr x9, [x3, #8]
+ ldp x5, x3, [x2]
+ ldp x6, x7, [x2, #16]
+ ldp x19, x2, [x2, #32]
+ umulh x20, x12, x5
+ mul x21, x12, x5
+ umulh x22, x16, x5
+ mul x23, x16, x5
+ umulh x24, x11, x5
+ mul x25, x11, x5
+ mov x1, x13
+ umulh x26, x1, x5
+ mul x27, x1, x5
+ mov x13, x0
+ umulh x28, x13, x5
+ mul x29, x13, x5
+ mov x8, x17
+ umulh x30, x8, x5
+ mul x5, x8, x5
+ adds x29, x30, x29
+ mul x30, x3, x12
+ adcs x27, x28, x27
+ mul x28, x3, x16
+ adcs x25, x26, x25
+ mul x26, x3, x11
+ adcs x23, x24, x23
+ mul x24, x5, x18
+ adcs x21, x22, x21
+ mul x22, x24, x15
+ adcs x20, x20, xzr
+ cmn x22, x5
+ mul x5, x3, x1
+ mov x0, x9
+ mul x22, x24, x0
+ adcs x22, x22, x29
+ mul x29, x24, x4
+ adcs x17, x29, x27
+ mul x29, x24, x10
+ adcs x25, x29, x25
+ ldr x9, [sp, #104] // 8-byte Folded Reload
+ mul x29, x24, x9
+ adcs x23, x29, x23
+ mul x29, x24, x14
+ adcs x21, x29, x21
+ umulh x29, x24, x15
+ adcs x20, x20, xzr
+ adds x22, x22, x29
+ umulh x29, x24, x0
+ adcs x15, x17, x29
+ umulh x29, x24, x4
+ mov x17, x4
+ adcs x25, x25, x29
+ umulh x29, x24, x10
+ adcs x23, x23, x29
+ umulh x29, x24, x9
+ adcs x21, x21, x29
+ mul x29, x3, x13
+ umulh x24, x24, x14
+ adcs x20, x20, x24
+ umulh x24, x3, x8
+ adds x24, x24, x29
+ umulh x29, x3, x13
+ adcs x5, x29, x5
+ umulh x29, x3, x1
+ adcs x26, x29, x26
+ umulh x29, x3, x11
+ adcs x28, x29, x28
+ umulh x29, x3, x16
+ adcs x29, x29, x30
+ umulh x30, x3, x12
+ mul x3, x3, x8
+ adcs x30, x30, xzr
+ adds x3, x3, x22
+ umulh x22, x6, x12
+ adcs x24, x24, x15
+ mul x27, x6, x12
+ adcs x5, x5, x25
+ mul x25, x6, x16
+ adcs x23, x26, x23
+ mul x26, x6, x11
+ adcs x21, x28, x21
+ mul x28, x3, x18
+ mov x4, x18
+ adcs x20, x29, x20
+ ldr x18, [sp, #8] // 8-byte Folded Reload
+ mul x29, x28, x18
+ adcs x30, x30, xzr
+ cmn x29, x3
+ mul x3, x6, x1
+ mul x29, x28, x0
+ adcs x24, x29, x24
+ mul x29, x28, x17
+ adcs x5, x29, x5
+ mul x29, x28, x10
+ adcs x23, x29, x23
+ mul x29, x28, x9
+ adcs x21, x29, x21
+ mul x29, x28, x14
+ adcs x20, x29, x20
+ umulh x29, x28, x18
+ adcs x30, x30, xzr
+ adds x24, x24, x29
+ umulh x29, x28, x0
+ adcs x5, x5, x29
+ umulh x29, x28, x17
+ adcs x23, x23, x29
+ umulh x29, x28, x10
+ adcs x21, x21, x29
+ umulh x29, x28, x9
+ adcs x20, x20, x29
+ mul x29, x6, x13
+ umulh x28, x28, x14
+ adcs x28, x30, x28
+ umulh x30, x6, x8
+ adds x29, x30, x29
+ umulh x30, x6, x13
+ adcs x3, x30, x3
+ umulh x30, x6, x1
+ adcs x26, x30, x26
+ umulh x30, x6, x11
+ adcs x25, x30, x25
+ umulh x30, x6, x16
+ mul x6, x6, x8
+ adcs x27, x30, x27
+ umulh x30, x7, x12
+ adcs x22, x22, xzr
+ adds x6, x6, x24
+ mul x24, x7, x12
+ adcs x5, x29, x5
+ umulh x29, x7, x16
+ adcs x3, x3, x23
+ mul x23, x7, x16
+ adcs x21, x26, x21
+ mul x26, x7, x11
+ adcs x20, x25, x20
+ mul x25, x6, x4
+ adcs x27, x27, x28
+ mul x28, x25, x18
+ adcs x22, x22, xzr
+ cmn x28, x6
+ mul x6, x7, x1
+ mul x28, x25, x0
+ adcs x5, x28, x5
+ mul x28, x25, x17
+ adcs x3, x28, x3
+ mul x28, x25, x10
+ adcs x21, x28, x21
+ mul x28, x25, x9
+ adcs x20, x28, x20
+ mul x28, x25, x14
+ adcs x27, x28, x27
+ umulh x28, x25, x18
+ adcs x22, x22, xzr
+ adds x5, x5, x28
+ umulh x28, x25, x0
+ adcs x3, x3, x28
+ umulh x28, x25, x17
+ adcs x21, x21, x28
+ umulh x28, x25, x10
+ adcs x20, x20, x28
+ umulh x28, x25, x9
+ adcs x27, x27, x28
+ mul x28, x7, x13
+ umulh x25, x25, x14
+ adcs x22, x22, x25
+ umulh x25, x7, x8
+ adds x25, x25, x28
+ umulh x28, x7, x13
+ adcs x6, x28, x6
+ umulh x28, x7, x1
+ adcs x26, x28, x26
+ umulh x28, x7, x11
+ mul x7, x7, x8
+ adcs x23, x28, x23
+ umulh x9, x19, x12
+ str x9, [sp, #16] // 8-byte Folded Spill
+ adcs x24, x29, x24
+ mul x9, x19, x12
+ str x9, [sp, #32] // 8-byte Folded Spill
+ adcs x30, x30, xzr
+ adds x5, x7, x5
+ umulh x7, x19, x16
+ adcs x3, x25, x3
+ mul x25, x19, x16
+ adcs x6, x6, x21
+ umulh x21, x19, x11
+ adcs x20, x26, x20
+ mul x26, x19, x11
+ adcs x23, x23, x27
+ mul x27, x5, x4
+ adcs x22, x24, x22
+ mul x24, x27, x18
+ adcs x30, x30, xzr
+ cmn x24, x5
+ mov x28, x1
+ mul x5, x19, x28
+ mul x24, x19, x13
+ umulh x1, x19, x8
+ umulh x9, x19, x13
+ umulh x15, x19, x28
+ mul x19, x19, x8
+ umulh x29, x2, x12
+ str x29, [sp, #88] // 8-byte Folded Spill
+ mul x29, x2, x12
+ umulh x12, x2, x16
+ str x12, [sp, #80] // 8-byte Folded Spill
+ mul x12, x2, x16
+ str x12, [sp, #72] // 8-byte Folded Spill
+ umulh x12, x2, x11
+ mul x11, x2, x11
+ stp x11, x12, [sp, #56]
+ umulh x11, x2, x28
+ str x11, [sp, #48] // 8-byte Folded Spill
+ mul x11, x2, x28
+ str x11, [sp, #40] // 8-byte Folded Spill
+ umulh x11, x2, x13
+ str x11, [sp, #24] // 8-byte Folded Spill
+ mul x13, x2, x13
+ umulh x16, x2, x8
+ mul x28, x2, x8
+ mul x2, x27, x0
+ adcs x2, x2, x3
+ mul x3, x27, x17
+ adcs x3, x3, x6
+ mul x6, x27, x10
+ adcs x6, x6, x20
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ mul x20, x27, x8
+ adcs x20, x20, x23
+ mul x23, x27, x14
+ adcs x22, x23, x22
+ adcs x23, x30, xzr
+ umulh x30, x27, x18
+ adds x2, x2, x30
+ umulh x30, x27, x0
+ adcs x3, x3, x30
+ umulh x30, x27, x17
+ mov x12, x17
+ adcs x6, x6, x30
+ umulh x30, x27, x10
+ adcs x20, x20, x30
+ umulh x30, x27, x8
+ mov x11, x8
+ adcs x22, x22, x30
+ mov x30, x14
+ umulh x27, x27, x30
+ adcs x23, x23, x27
+ adds x8, x1, x24
+ adcs x9, x9, x5
+ adcs x14, x15, x26
+ adcs x5, x21, x25
+ ldr x15, [sp, #32] // 8-byte Folded Reload
+ adcs x7, x7, x15
+ ldr x15, [sp, #16] // 8-byte Folded Reload
+ adcs x21, x15, xzr
+ adds x2, x19, x2
+ adcs x8, x8, x3
+ adcs x9, x9, x6
+ mov x24, x4
+ mul x3, x2, x24
+ adcs x14, x14, x20
+ mul x6, x3, x30
+ adcs x5, x5, x22
+ mul x19, x3, x11
+ adcs x7, x7, x23
+ mul x20, x3, x18
+ adcs x21, x21, xzr
+ cmn x20, x2
+ mul x2, x3, x10
+ mul x20, x3, x0
+ adcs x8, x20, x8
+ mul x20, x3, x12
+ adcs x9, x20, x9
+ umulh x20, x3, x30
+ adcs x14, x2, x14
+ umulh x2, x3, x11
+ mov x27, x11
+ adcs x5, x19, x5
+ mov x11, x10
+ umulh x19, x3, x11
+ adcs x6, x6, x7
+ umulh x7, x3, x18
+ adcs x21, x21, xzr
+ adds x8, x8, x7
+ umulh x7, x3, x12
+ umulh x3, x3, x0
+ adcs x9, x9, x3
+ adcs x10, x14, x7
+ adcs x3, x5, x19
+ adcs x2, x6, x2
+ adcs x5, x21, x20
+ adds x15, x16, x13
+ ldr x13, [sp, #40] // 8-byte Folded Reload
+ ldr x14, [sp, #24] // 8-byte Folded Reload
+ adcs x16, x14, x13
+ ldp x14, x13, [sp, #48]
+ adcs x17, x14, x13
+ ldp x14, x13, [sp, #64]
+ adcs x1, x14, x13
+ ldr x13, [sp, #80] // 8-byte Folded Reload
+ adcs x4, x13, x29
+ ldr x13, [sp, #88] // 8-byte Folded Reload
+ adcs x6, x13, xzr
+ adds x8, x28, x8
+ adcs x9, x15, x9
+ mul x15, x8, x24
+ adcs x10, x16, x10
+ mul x16, x15, x30
+ mul x14, x15, x27
+ mul x7, x15, x11
+ mul x19, x15, x12
+ mul x20, x15, x0
+ mul x21, x15, x18
+ umulh x22, x15, x30
+ umulh x23, x15, x27
+ umulh x24, x15, x11
+ mov x28, x11
+ umulh x25, x15, x12
+ umulh x26, x15, x0
+ umulh x15, x15, x18
+ adcs x17, x17, x3
+ adcs x1, x1, x2
+ adcs x2, x4, x5
+ adcs x3, x6, xzr
+ cmn x21, x8
+ adcs x8, x20, x9
+ adcs x9, x19, x10
+ adcs x10, x7, x17
+ adcs x17, x14, x1
+ adcs x16, x16, x2
+ adcs x11, x3, xzr
+ adds x8, x8, x15
+ adcs x9, x9, x26
+ adcs x10, x10, x25
+ adcs x15, x17, x24
+ adcs x16, x16, x23
+ adcs x17, x11, x22
+ subs x3, x8, x18
+ sbcs x2, x9, x0
+ sbcs x11, x10, x12
+ sbcs x14, x15, x28
+ sbcs x18, x16, x27
+ sbcs x0, x17, x30
+ asr x1, x0, #63
+ cmp x1, #0 // =0
+ csel x8, x8, x3, lt
+ csel x9, x9, x2, lt
+ csel x10, x10, x11, lt
+ csel x11, x15, x14, lt
+ csel x12, x16, x18, lt
+ csel x13, x17, x0, lt
+ ldr x14, [sp, #96] // 8-byte Folded Reload
+ stp x8, x9, [x14]
+ stp x10, x11, [x14, #16]
+ stp x12, x13, [x14, #32]
+ add sp, sp, #112 // =112
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 2
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: // @mcl_fp_montRed6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldur x14, [x2, #-8]
+ ldp x9, x8, [x2, #32]
+ ldp x11, x10, [x2, #16]
+ ldp x13, x12, [x2]
+ ldp x16, x17, [x1, #80]
+ ldp x18, x2, [x1, #64]
+ ldp x3, x4, [x1, #48]
+ ldp x5, x6, [x1, #32]
+ ldp x7, x19, [x1, #16]
+ ldp x15, x1, [x1]
+ mul x20, x15, x14
+ mul x21, x20, x8
+ mul x22, x20, x9
+ mul x23, x20, x10
+ mul x24, x20, x11
+ mul x25, x20, x12
+ umulh x26, x20, x13
+ adds x25, x26, x25
+ umulh x26, x20, x12
+ adcs x24, x26, x24
+ umulh x26, x20, x11
+ adcs x23, x26, x23
+ umulh x26, x20, x10
+ adcs x22, x26, x22
+ umulh x26, x20, x9
+ adcs x21, x26, x21
+ umulh x26, x20, x8
+ mul x20, x20, x13
+ adcs x26, x26, xzr
+ cmn x15, x20
+ adcs x15, x1, x25
+ adcs x1, x7, x24
+ mul x7, x15, x14
+ adcs x19, x19, x23
+ mul x20, x7, x8
+ mul x23, x7, x9
+ mul x24, x7, x10
+ mul x25, x7, x11
+ adcs x5, x5, x22
+ mul x22, x7, x12
+ adcs x6, x6, x21
+ umulh x21, x7, x13
+ adcs x3, x3, x26
+ adcs x4, x4, xzr
+ adcs x18, x18, xzr
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x26, xzr, xzr
+ adds x21, x21, x22
+ umulh x22, x7, x12
+ adcs x22, x22, x25
+ umulh x25, x7, x11
+ adcs x24, x25, x24
+ umulh x25, x7, x10
+ adcs x23, x25, x23
+ umulh x25, x7, x9
+ adcs x20, x25, x20
+ umulh x25, x7, x8
+ mul x7, x7, x13
+ adcs x25, x25, xzr
+ cmn x7, x15
+ adcs x15, x21, x1
+ adcs x1, x22, x19
+ mul x7, x15, x14
+ adcs x5, x24, x5
+ mul x19, x7, x8
+ mul x21, x7, x9
+ mul x22, x7, x10
+ adcs x6, x23, x6
+ mul x23, x7, x11
+ adcs x3, x20, x3
+ mul x20, x7, x12
+ adcs x4, x25, x4
+ umulh x24, x7, x13
+ adcs x18, x18, xzr
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x25, x26, xzr
+ adds x20, x24, x20
+ umulh x24, x7, x12
+ adcs x23, x24, x23
+ umulh x24, x7, x11
+ adcs x22, x24, x22
+ umulh x24, x7, x10
+ adcs x21, x24, x21
+ umulh x24, x7, x9
+ adcs x19, x24, x19
+ umulh x24, x7, x8
+ mul x7, x7, x13
+ adcs x24, x24, xzr
+ cmn x7, x15
+ adcs x15, x20, x1
+ adcs x1, x23, x5
+ mul x5, x15, x14
+ adcs x6, x22, x6
+ mul x7, x5, x8
+ mul x20, x5, x9
+ mul x22, x5, x10
+ adcs x3, x21, x3
+ mul x21, x5, x11
+ adcs x4, x19, x4
+ mul x19, x5, x12
+ adcs x18, x24, x18
+ umulh x23, x5, x13
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x24, x25, xzr
+ adds x19, x23, x19
+ umulh x23, x5, x12
+ adcs x21, x23, x21
+ umulh x23, x5, x11
+ adcs x22, x23, x22
+ umulh x23, x5, x10
+ adcs x20, x23, x20
+ umulh x23, x5, x9
+ adcs x7, x23, x7
+ umulh x23, x5, x8
+ mul x5, x5, x13
+ adcs x23, x23, xzr
+ cmn x5, x15
+ adcs x15, x19, x1
+ adcs x1, x21, x6
+ mul x5, x15, x14
+ adcs x3, x22, x3
+ mul x6, x5, x8
+ mul x19, x5, x9
+ mul x21, x5, x10
+ adcs x4, x20, x4
+ mul x20, x5, x11
+ adcs x18, x7, x18
+ mul x7, x5, x12
+ adcs x2, x23, x2
+ umulh x22, x5, x13
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x23, x24, xzr
+ adds x7, x22, x7
+ umulh x22, x5, x12
+ adcs x20, x22, x20
+ umulh x22, x5, x11
+ adcs x21, x22, x21
+ umulh x22, x5, x10
+ adcs x19, x22, x19
+ umulh x22, x5, x9
+ adcs x6, x22, x6
+ umulh x22, x5, x8
+ mul x5, x5, x13
+ adcs x22, x22, xzr
+ cmn x5, x15
+ adcs x15, x7, x1
+ adcs x1, x20, x3
+ mul x14, x15, x14
+ adcs x3, x21, x4
+ mul x4, x14, x8
+ mul x5, x14, x9
+ mul x7, x14, x10
+ adcs x18, x19, x18
+ mul x19, x14, x11
+ adcs x2, x6, x2
+ mul x6, x14, x12
+ adcs x16, x22, x16
+ umulh x20, x14, x13
+ adcs x17, x17, xzr
+ adcs x21, x23, xzr
+ adds x6, x20, x6
+ umulh x20, x14, x12
+ adcs x19, x20, x19
+ umulh x20, x14, x11
+ adcs x7, x20, x7
+ umulh x20, x14, x10
+ adcs x5, x20, x5
+ umulh x20, x14, x9
+ adcs x4, x20, x4
+ umulh x20, x14, x8
+ mul x14, x14, x13
+ adcs x20, x20, xzr
+ cmn x14, x15
+ adcs x14, x6, x1
+ adcs x15, x19, x3
+ adcs x18, x7, x18
+ adcs x1, x5, x2
+ adcs x16, x4, x16
+ adcs x17, x20, x17
+ adcs x2, x21, xzr
+ subs x13, x14, x13
+ sbcs x12, x15, x12
+ sbcs x11, x18, x11
+ sbcs x10, x1, x10
+ sbcs x9, x16, x9
+ sbcs x8, x17, x8
+ sbcs x2, x2, xzr
+ tst x2, #0x1
+ csel x13, x14, x13, ne
+ csel x12, x15, x12, ne
+ csel x11, x18, x11, ne
+ csel x10, x1, x10, ne
+ csel x9, x16, x9, ne
+ csel x8, x17, x8, ne
+ stp x13, x12, [x0]
+ stp x11, x10, [x0, #16]
+ stp x9, x8, [x0, #32]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 2
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: // @mcl_fp_addPre6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ adds x14, x14, x16
+ str x14, [x0]
+ adcs x14, x15, x17
+ adcs x12, x12, x18
+ stp x14, x12, [x0, #8]
+ adcs x12, x13, x1
+ adcs x8, x8, x10
+ stp x12, x8, [x0, #24]
+ adcs x9, x9, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 2
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: // @mcl_fp_subPre6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ subs x14, x16, x14
+ str x14, [x0]
+ sbcs x14, x17, x15
+ sbcs x12, x18, x12
+ stp x14, x12, [x0, #8]
+ sbcs x12, x1, x13
+ sbcs x8, x10, x8
+ stp x12, x8, [x0, #24]
+ sbcs x9, x11, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 2
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: // @mcl_fp_shr1_6L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldp x12, x13, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ extr x11, x12, x11, #1
+ extr x12, x13, x12, #1
+ lsr x13, x13, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ ret
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 2
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: // @mcl_fp_add6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ adds x14, x14, x16
+ adcs x15, x15, x17
+ ldp x16, x17, [x3, #32]
+ adcs x18, x12, x18
+ adcs x1, x13, x1
+ ldp x12, x2, [x3]
+ stp x14, x15, [x0]
+ stp x18, x1, [x0, #16]
+ adcs x8, x8, x10
+ adcs x4, x9, x11
+ stp x8, x4, [x0, #32]
+ adcs x5, xzr, xzr
+ ldp x9, x10, [x3, #16]
+ subs x13, x14, x12
+ sbcs x12, x15, x2
+ sbcs x11, x18, x9
+ sbcs x10, x1, x10
+ sbcs x9, x8, x16
+ sbcs x8, x4, x17
+ sbcs x14, x5, xzr
+ and w14, w14, #0x1
+ tbnz w14, #0, .LBB89_2
+// BB#1: // %nocarry
+ stp x13, x12, [x0]
+ stp x11, x10, [x0, #16]
+ stp x9, x8, [x0, #32]
+.LBB89_2: // %carry
+ ret
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 2
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: // @mcl_fp_addNF6L
+// BB#0:
+ ldp x8, x9, [x1, #32]
+ ldp x10, x11, [x2, #32]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x15, [x1]
+ ldp x16, x17, [x2]
+ ldp x18, x1, [x2, #16]
+ adds x14, x16, x14
+ adcs x15, x17, x15
+ ldp x16, x17, [x3, #32]
+ adcs x12, x18, x12
+ adcs x13, x1, x13
+ ldp x18, x1, [x3]
+ adcs x8, x10, x8
+ ldp x10, x2, [x3, #16]
+ adcs x9, x11, x9
+ subs x11, x14, x18
+ sbcs x18, x15, x1
+ sbcs x10, x12, x10
+ sbcs x1, x13, x2
+ sbcs x16, x8, x16
+ sbcs x17, x9, x17
+ asr x2, x17, #63
+ cmp x2, #0 // =0
+ csel x11, x14, x11, lt
+ csel x14, x15, x18, lt
+ csel x10, x12, x10, lt
+ csel x12, x13, x1, lt
+ csel x8, x8, x16, lt
+ csel x9, x9, x17, lt
+ stp x11, x14, [x0]
+ stp x10, x12, [x0, #16]
+ stp x8, x9, [x0, #32]
+ ret
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 2
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: // @mcl_fp_sub6L
+// BB#0:
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ subs x8, x16, x8
+ sbcs x9, x17, x9
+ stp x8, x9, [x0]
+ sbcs x10, x18, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ stp x12, x13, [x0, #32]
+ ngcs x14, xzr
+ and w14, w14, #0x1
+ tbnz w14, #0, .LBB91_2
+// BB#1: // %nocarry
+ ret
+.LBB91_2: // %carry
+ ldp x14, x15, [x3, #32]
+ ldp x16, x17, [x3]
+ ldp x18, x1, [x3, #16]
+ adds x8, x16, x8
+ adcs x9, x17, x9
+ adcs x10, x18, x10
+ adcs x11, x1, x11
+ adcs x12, x14, x12
+ adcs x13, x15, x13
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ ret
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 2
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: // @mcl_fp_subNF6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x15, x1, [x1]
+ subs x14, x15, x14
+ ldp x15, x2, [x3, #32]
+ sbcs x18, x1, x18
+ sbcs x12, x16, x12
+ ldp x16, x1, [x3, #16]
+ sbcs x13, x17, x13
+ ldp x17, x3, [x3]
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ asr x10, x9, #63
+ adds x11, x10, x10
+ and x16, x10, x16
+ and x1, x10, x1
+ and x15, x10, x15
+ and x2, x10, x2
+ adcs x10, x10, x10
+ orr x11, x11, x9, lsr #63
+ and x11, x11, x17
+ and x10, x10, x3
+ adds x11, x11, x14
+ adcs x10, x10, x18
+ stp x11, x10, [x0]
+ adcs x10, x16, x12
+ str x10, [x0, #16]
+ adcs x10, x1, x13
+ adcs x8, x15, x8
+ stp x10, x8, [x0, #24]
+ adcs x8, x2, x9
+ str x8, [x0, #40]
+ ret
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 2
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: // @mcl_fpDbl_add6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldp x8, x9, [x2, #80]
+ ldp x10, x11, [x1, #80]
+ ldp x12, x13, [x2, #64]
+ ldp x14, x15, [x1, #64]
+ ldp x16, x17, [x2, #48]
+ ldp x18, x4, [x1, #48]
+ ldp x5, x6, [x2, #32]
+ ldp x7, x19, [x1, #32]
+ ldp x20, x21, [x2, #16]
+ ldp x23, x2, [x2]
+ ldp x24, x25, [x1, #16]
+ ldp x22, x1, [x1]
+ adds x22, x23, x22
+ str x22, [x0]
+ ldp x22, x23, [x3, #32]
+ adcs x1, x2, x1
+ str x1, [x0, #8]
+ ldp x1, x2, [x3, #16]
+ adcs x20, x20, x24
+ ldp x24, x3, [x3]
+ str x20, [x0, #16]
+ adcs x20, x21, x25
+ adcs x5, x5, x7
+ stp x20, x5, [x0, #24]
+ adcs x5, x6, x19
+ str x5, [x0, #40]
+ adcs x16, x16, x18
+ adcs x17, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x16, x24
+ sbcs x14, x17, x3
+ sbcs x15, x12, x1
+ sbcs x18, x13, x2
+ sbcs x1, x8, x22
+ sbcs x2, x9, x23
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x16, x11, ne
+ csel x11, x17, x14, ne
+ csel x12, x12, x15, ne
+ csel x13, x13, x18, ne
+ csel x8, x8, x1, ne
+ csel x9, x9, x2, ne
+ stp x10, x11, [x0, #48]
+ stp x12, x13, [x0, #64]
+ stp x8, x9, [x0, #80]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 2
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: // @mcl_fpDbl_sub6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldp x8, x9, [x2, #80]
+ ldp x10, x11, [x1, #80]
+ ldp x12, x13, [x2, #64]
+ ldp x14, x15, [x1, #64]
+ ldp x16, x17, [x2, #48]
+ ldp x18, x4, [x1, #48]
+ ldp x5, x6, [x2, #32]
+ ldp x7, x19, [x1, #32]
+ ldp x20, x21, [x2, #16]
+ ldp x22, x2, [x2]
+ ldp x24, x25, [x1, #16]
+ ldp x23, x1, [x1]
+ subs x22, x23, x22
+ str x22, [x0]
+ ldp x22, x23, [x3, #32]
+ sbcs x1, x1, x2
+ str x1, [x0, #8]
+ ldp x1, x2, [x3, #16]
+ sbcs x20, x24, x20
+ ldp x24, x3, [x3]
+ str x20, [x0, #16]
+ sbcs x20, x25, x21
+ sbcs x5, x7, x5
+ stp x20, x5, [x0, #24]
+ sbcs x5, x19, x6
+ sbcs x16, x18, x16
+ sbcs x17, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x23, xzr, ne
+ csel x11, x22, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x1, xzr, ne
+ csel x18, x3, xzr, ne
+ csel x1, x24, xzr, ne
+ adds x16, x1, x16
+ stp x5, x16, [x0, #40]
+ adcs x16, x18, x17
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #56]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #72]
+ adcs x8, x10, x9
+ str x8, [x0, #88]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 2
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: // @mcl_fp_mulUnitPre7L
+// BB#0:
+ ldp x10, x8, [x1, #40]
+ ldp x14, x9, [x1, #24]
+ ldp x11, x12, [x1]
+ ldr x13, [x1, #16]
+ mul x15, x11, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x13, x2
+ umulh x12, x12, x2
+ mul x18, x14, x2
+ umulh x13, x13, x2
+ mul x1, x9, x2
+ umulh x14, x14, x2
+ mul x3, x10, x2
+ umulh x9, x9, x2
+ mul x4, x8, x2
+ umulh x10, x10, x2
+ umulh x8, x8, x2
+ adds x11, x11, x16
+ stp x15, x11, [x0]
+ adcs x11, x12, x17
+ str x11, [x0, #16]
+ adcs x11, x13, x18
+ str x11, [x0, #24]
+ adcs x11, x14, x1
+ adcs x9, x9, x3
+ stp x11, x9, [x0, #32]
+ adcs x9, x10, x4
+ adcs x8, x8, xzr
+ stp x9, x8, [x0, #48]
+ ret
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 2
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: // @mcl_fpDbl_mulPre7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #624 // =624
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #24]
+ ldp x12, x13, [x1, #40]
+ ldp x14, x15, [x2]
+ ldp x16, x18, [x1, #16]
+ mul x17, x8, x14
+ str x17, [sp, #528] // 8-byte Folded Spill
+ umulh x17, x13, x14
+ str x17, [sp, #616] // 8-byte Folded Spill
+ mul x17, x13, x14
+ str x17, [sp, #608] // 8-byte Folded Spill
+ umulh x17, x12, x14
+ str x17, [sp, #592] // 8-byte Folded Spill
+ mul x17, x12, x14
+ str x17, [sp, #568] // 8-byte Folded Spill
+ umulh x17, x11, x14
+ str x17, [sp, #552] // 8-byte Folded Spill
+ mul x17, x11, x14
+ str x17, [sp, #512] // 8-byte Folded Spill
+ umulh x17, x10, x14
+ str x17, [sp, #496] // 8-byte Folded Spill
+ mul x17, x10, x14
+ str x17, [sp, #456] // 8-byte Folded Spill
+ umulh x17, x16, x14
+ str x17, [sp, #424] // 8-byte Folded Spill
+ mul x17, x16, x14
+ str x17, [sp, #368] // 8-byte Folded Spill
+ umulh x17, x9, x14
+ str x17, [sp, #352] // 8-byte Folded Spill
+ mul x17, x9, x14
+ str x17, [sp, #304] // 8-byte Folded Spill
+ umulh x14, x8, x14
+ str x14, [sp, #272] // 8-byte Folded Spill
+ mul x14, x13, x15
+ str x14, [sp, #560] // 8-byte Folded Spill
+ mul x14, x12, x15
+ str x14, [sp, #520] // 8-byte Folded Spill
+ mul x14, x11, x15
+ str x14, [sp, #488] // 8-byte Folded Spill
+ mul x14, x10, x15
+ str x14, [sp, #448] // 8-byte Folded Spill
+ mul x14, x16, x15
+ umulh x13, x13, x15
+ str x13, [sp, #600] // 8-byte Folded Spill
+ umulh x12, x12, x15
+ str x12, [sp, #576] // 8-byte Folded Spill
+ umulh x11, x11, x15
+ str x11, [sp, #544] // 8-byte Folded Spill
+ umulh x10, x10, x15
+ str x10, [sp, #504] // 8-byte Folded Spill
+ umulh x10, x16, x15
+ str x10, [sp, #472] // 8-byte Folded Spill
+ mul x10, x9, x15
+ str x10, [sp, #208] // 8-byte Folded Spill
+ umulh x9, x9, x15
+ stp x9, x14, [sp, #400]
+ mul x9, x8, x15
+ str x9, [sp, #96] // 8-byte Folded Spill
+ umulh x8, x8, x15
+ str x8, [sp, #320] // 8-byte Folded Spill
+ ldp x9, x11, [x1]
+ ldp x10, x17, [x2, #16]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x16, [x1, #32]
+ ldr x15, [x1, #48]
+ mul x8, x9, x10
+ str x8, [sp, #248] // 8-byte Folded Spill
+ mul x8, x15, x10
+ str x8, [sp, #392] // 8-byte Folded Spill
+ mul x8, x16, x10
+ str x8, [sp, #344] // 8-byte Folded Spill
+ mul x8, x14, x10
+ str x8, [sp, #296] // 8-byte Folded Spill
+ mul x8, x13, x10
+ str x8, [sp, #240] // 8-byte Folded Spill
+ mul x8, x12, x10
+ str x8, [sp, #192] // 8-byte Folded Spill
+ mul x8, x11, x10
+ str x8, [sp, #136] // 8-byte Folded Spill
+ umulh x8, x15, x10
+ str x8, [sp, #440] // 8-byte Folded Spill
+ umulh x8, x16, x10
+ str x8, [sp, #384] // 8-byte Folded Spill
+ umulh x8, x14, x10
+ str x8, [sp, #336] // 8-byte Folded Spill
+ umulh x8, x13, x10
+ str x8, [sp, #288] // 8-byte Folded Spill
+ umulh x8, x12, x10
+ str x8, [sp, #232] // 8-byte Folded Spill
+ umulh x8, x11, x10
+ str x8, [sp, #184] // 8-byte Folded Spill
+ umulh x8, x9, x10
+ str x8, [sp, #128] // 8-byte Folded Spill
+ mul x8, x15, x17
+ str x8, [sp, #464] // 8-byte Folded Spill
+ umulh x8, x15, x17
+ str x8, [sp, #584] // 8-byte Folded Spill
+ mul x8, x16, x17
+ str x8, [sp, #376] // 8-byte Folded Spill
+ umulh x8, x16, x17
+ str x8, [sp, #536] // 8-byte Folded Spill
+ mul x8, x14, x17
+ str x8, [sp, #312] // 8-byte Folded Spill
+ umulh x8, x14, x17
+ str x8, [sp, #480] // 8-byte Folded Spill
+ mul x8, x13, x17
+ str x8, [sp, #224] // 8-byte Folded Spill
+ umulh x8, x13, x17
+ str x8, [sp, #416] // 8-byte Folded Spill
+ mul x8, x12, x17
+ str x8, [sp, #144] // 8-byte Folded Spill
+ umulh x8, x12, x17
+ str x8, [sp, #328] // 8-byte Folded Spill
+ mul x8, x11, x17
+ str x8, [sp, #80] // 8-byte Folded Spill
+ umulh x8, x11, x17
+ str x8, [sp, #264] // 8-byte Folded Spill
+ mul x28, x9, x17
+ umulh x8, x9, x17
+ str x8, [sp, #176] // 8-byte Folded Spill
+ ldp x14, x12, [x1, #24]
+ ldp x10, x9, [x1]
+ ldr x7, [x1, #16]
+ ldp x30, x5, [x1, #40]
+ ldp x27, x8, [x2, #32]
+ ldr x13, [x1, #48]
+ mul x11, x10, x27
+ str x11, [sp, #48] // 8-byte Folded Spill
+ mul x11, x5, x27
+ str x11, [sp, #168] // 8-byte Folded Spill
+ mul x11, x30, x27
+ str x11, [sp, #120] // 8-byte Folded Spill
+ mul x11, x12, x27
+ str x11, [sp, #72] // 8-byte Folded Spill
+ mul x11, x14, x27
+ str x11, [sp, #40] // 8-byte Folded Spill
+ mul x11, x7, x27
+ str x11, [sp, #16] // 8-byte Folded Spill
+ mul x24, x9, x27
+ umulh x11, x5, x27
+ str x11, [sp, #216] // 8-byte Folded Spill
+ umulh x11, x30, x27
+ str x11, [sp, #160] // 8-byte Folded Spill
+ umulh x11, x12, x27
+ str x11, [sp, #112] // 8-byte Folded Spill
+ umulh x11, x14, x27
+ str x11, [sp, #64] // 8-byte Folded Spill
+ umulh x11, x7, x27
+ str x11, [sp, #32] // 8-byte Folded Spill
+ umulh x29, x9, x27
+ umulh x23, x10, x27
+ mul x11, x5, x8
+ str x11, [sp, #256] // 8-byte Folded Spill
+ umulh x11, x5, x8
+ str x11, [sp, #432] // 8-byte Folded Spill
+ mul x11, x30, x8
+ str x11, [sp, #152] // 8-byte Folded Spill
+ umulh x11, x30, x8
+ str x11, [sp, #360] // 8-byte Folded Spill
+ mul x11, x12, x8
+ str x11, [sp, #88] // 8-byte Folded Spill
+ umulh x11, x12, x8
+ str x11, [sp, #280] // 8-byte Folded Spill
+ mul x11, x14, x8
+ str x11, [sp, #24] // 8-byte Folded Spill
+ umulh x11, x14, x8
+ str x11, [sp, #200] // 8-byte Folded Spill
+ mul x25, x7, x8
+ umulh x11, x7, x8
+ str x11, [sp, #104] // 8-byte Folded Spill
+ mul x22, x9, x8
+ umulh x9, x9, x8
+ str x9, [sp, #56] // 8-byte Folded Spill
+ mul x20, x10, x8
+ umulh x26, x10, x8
+ ldr x10, [x2, #48]
+ ldp x2, x8, [x1]
+ ldr x9, [x1, #16]
+ ldp x11, x1, [x1, #32]
+ mul x27, x2, x10
+ umulh x21, x2, x10
+ mul x5, x8, x10
+ umulh x19, x8, x10
+ mul x3, x9, x10
+ umulh x7, x9, x10
+ mul x2, x18, x10
+ umulh x6, x18, x10
+ mul x17, x11, x10
+ umulh x4, x11, x10
+ mul x16, x1, x10
+ umulh x1, x1, x10
+ mul x15, x13, x10
+ umulh x18, x13, x10
+ ldr x8, [sp, #528] // 8-byte Folded Reload
+ str x8, [x0]
+ ldr x8, [sp, #304] // 8-byte Folded Reload
+ ldr x9, [sp, #272] // 8-byte Folded Reload
+ adds x13, x9, x8
+ ldr x8, [sp, #368] // 8-byte Folded Reload
+ ldr x9, [sp, #352] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ ldr x9, [sp, #456] // 8-byte Folded Reload
+ ldr x10, [sp, #424] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ ldr x10, [sp, #512] // 8-byte Folded Reload
+ ldr x11, [sp, #496] // 8-byte Folded Reload
+ adcs x10, x11, x10
+ ldr x11, [sp, #568] // 8-byte Folded Reload
+ ldr x12, [sp, #552] // 8-byte Folded Reload
+ adcs x11, x12, x11
+ ldr x12, [sp, #608] // 8-byte Folded Reload
+ ldr x14, [sp, #592] // 8-byte Folded Reload
+ adcs x12, x14, x12
+ ldr x14, [sp, #616] // 8-byte Folded Reload
+ adcs x14, x14, xzr
+ ldr x30, [sp, #96] // 8-byte Folded Reload
+ adds x13, x30, x13
+ str x13, [x0, #8]
+ ldr x13, [sp, #208] // 8-byte Folded Reload
+ adcs x8, x13, x8
+ ldr x13, [sp, #408] // 8-byte Folded Reload
+ adcs x9, x13, x9
+ ldr x13, [sp, #448] // 8-byte Folded Reload
+ adcs x10, x13, x10
+ ldr x13, [sp, #488] // 8-byte Folded Reload
+ adcs x11, x13, x11
+ ldr x13, [sp, #520] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ ldr x13, [sp, #560] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ ldr x30, [sp, #320] // 8-byte Folded Reload
+ adds x8, x8, x30
+ ldr x30, [sp, #400] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #472] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #504] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #544] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #576] // 8-byte Folded Reload
+ adcs x13, x13, x30
+ ldr x30, [sp, #600] // 8-byte Folded Reload
+ adcs x14, x14, x30
+ ldr x30, [sp, #248] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #16]
+ ldp x30, x8, [sp, #128]
+ adcs x8, x8, x9
+ ldr x9, [sp, #192] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #240] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #296] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #344] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #392] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #184] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #232] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #288] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #336] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #384] // 8-byte Folded Reload
+ adcs x13, x13, x30
+ ldr x30, [sp, #440] // 8-byte Folded Reload
+ adcs x14, x14, x30
+ adds x8, x28, x8
+ str x8, [x0, #24]
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, x9
+ ldr x9, [sp, #144] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #224] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #312] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #376] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #464] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ ldr x28, [sp, #176] // 8-byte Folded Reload
+ adds x8, x8, x28
+ ldr x28, [sp, #264] // 8-byte Folded Reload
+ adcs x9, x9, x28
+ ldr x28, [sp, #328] // 8-byte Folded Reload
+ adcs x10, x10, x28
+ ldr x28, [sp, #416] // 8-byte Folded Reload
+ adcs x11, x11, x28
+ ldr x28, [sp, #480] // 8-byte Folded Reload
+ adcs x12, x12, x28
+ ldr x28, [sp, #536] // 8-byte Folded Reload
+ adcs x13, x13, x28
+ ldr x28, [sp, #584] // 8-byte Folded Reload
+ adcs x14, x14, x28
+ ldr x28, [sp, #48] // 8-byte Folded Reload
+ adds x8, x28, x8
+ str x8, [x0, #32]
+ adcs x8, x24, x9
+ ldr x9, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #72] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #120] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #168] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x23
+ adcs x9, x9, x29
+ ldr x23, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x10, x23
+ ldr x23, [sp, #64] // 8-byte Folded Reload
+ adcs x11, x11, x23
+ ldr x23, [sp, #112] // 8-byte Folded Reload
+ adcs x12, x12, x23
+ ldr x23, [sp, #160] // 8-byte Folded Reload
+ adcs x13, x13, x23
+ ldr x23, [sp, #216] // 8-byte Folded Reload
+ adcs x14, x14, x23
+ adds x8, x20, x8
+ str x8, [x0, #40]
+ adcs x8, x22, x9
+ adcs x9, x25, x10
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #88] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #152] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #256] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x26
+ ldr x20, [sp, #56] // 8-byte Folded Reload
+ adcs x9, x9, x20
+ ldr x20, [sp, #104] // 8-byte Folded Reload
+ adcs x10, x10, x20
+ ldr x20, [sp, #200] // 8-byte Folded Reload
+ adcs x11, x11, x20
+ ldr x20, [sp, #280] // 8-byte Folded Reload
+ adcs x12, x12, x20
+ ldr x20, [sp, #360] // 8-byte Folded Reload
+ adcs x13, x13, x20
+ ldr x20, [sp, #432] // 8-byte Folded Reload
+ adcs x14, x14, x20
+ adds x8, x27, x8
+ str x8, [x0, #48]
+ adcs x8, x5, x9
+ adcs x9, x3, x10
+ adcs x10, x2, x11
+ adcs x11, x17, x12
+ adcs x12, x16, x13
+ adcs x13, x15, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x21
+ str x8, [x0, #56]
+ adcs x8, x9, x19
+ str x8, [x0, #64]
+ adcs x8, x10, x7
+ str x8, [x0, #72]
+ adcs x8, x11, x6
+ str x8, [x0, #80]
+ adcs x8, x12, x4
+ str x8, [x0, #88]
+ adcs x8, x13, x1
+ str x8, [x0, #96]
+ adcs x8, x14, x18
+ str x8, [x0, #104]
+ add sp, sp, #624 // =624
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 2
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: // @mcl_fpDbl_sqrPre7L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x11, x8, [x1]
+ ldp x9, x10, [x1, #40]
+ ldp x15, x12, [x1, #16]
+ ldp x16, x3, [x1, #16]
+ ldp x13, x14, [x1, #32]
+ ldp x18, x17, [x1, #32]
+ ldr x2, [x1, #32]
+ mul x4, x11, x11
+ umulh x5, x10, x11
+ mul x6, x9, x11
+ mul x7, x18, x11
+ mul x19, x3, x11
+ umulh x20, x16, x11
+ mul x21, x16, x11
+ umulh x22, x8, x11
+ mul x23, x8, x11
+ str x4, [x0]
+ umulh x4, x11, x11
+ adds x4, x4, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ umulh x20, x3, x11
+ adcs x7, x20, x7
+ umulh x20, x18, x11
+ adcs x6, x20, x6
+ mul x20, x10, x11
+ umulh x11, x9, x11
+ adcs x20, x11, x20
+ adcs x5, x5, xzr
+ adds x4, x23, x4
+ ldp x11, x23, [x1, #40]
+ str x4, [x0, #8]
+ mul x4, x8, x8
+ adcs x4, x4, x21
+ mul x21, x16, x8
+ adcs x19, x21, x19
+ mul x21, x3, x8
+ adcs x7, x21, x7
+ mul x21, x18, x8
+ adcs x6, x21, x6
+ mul x21, x9, x8
+ adcs x20, x21, x20
+ mul x21, x10, x8
+ umulh x10, x10, x8
+ umulh x9, x9, x8
+ umulh x18, x18, x8
+ umulh x3, x3, x8
+ umulh x16, x16, x8
+ umulh x8, x8, x8
+ adcs x5, x21, x5
+ adcs x21, xzr, xzr
+ adds x4, x4, x22
+ adcs x8, x19, x8
+ ldp x19, x22, [x1]
+ adcs x16, x7, x16
+ adcs x3, x6, x3
+ ldp x6, x7, [x1, #8]
+ adcs x18, x20, x18
+ mul x20, x19, x15
+ adcs x9, x5, x9
+ mul x5, x23, x15
+ adcs x10, x21, x10
+ mul x21, x14, x15
+ adds x4, x20, x4
+ mul x20, x13, x15
+ str x4, [x0, #16]
+ mul x4, x6, x15
+ adcs x8, x4, x8
+ mul x4, x15, x15
+ adcs x16, x4, x16
+ mul x4, x12, x15
+ adcs x3, x4, x3
+ adcs x18, x20, x18
+ umulh x20, x13, x15
+ adcs x9, x21, x9
+ umulh x21, x19, x15
+ adcs x10, x5, x10
+ adcs x5, xzr, xzr
+ adds x8, x8, x21
+ umulh x21, x6, x15
+ adcs x16, x16, x21
+ umulh x21, x15, x15
+ adcs x3, x3, x21
+ umulh x21, x12, x15
+ adcs x18, x18, x21
+ adcs x9, x9, x20
+ umulh x20, x14, x15
+ adcs x10, x10, x20
+ umulh x15, x23, x15
+ adcs x15, x5, x15
+ mul x5, x19, x12
+ adds x8, x5, x8
+ ldr x5, [x1, #32]
+ str x8, [x0, #24]
+ mul x8, x6, x12
+ adcs x8, x8, x16
+ ldr x16, [x1]
+ adcs x3, x4, x3
+ mul x4, x12, x12
+ adcs x18, x4, x18
+ mul x4, x13, x12
+ adcs x9, x4, x9
+ mul x4, x14, x12
+ adcs x10, x4, x10
+ mul x4, x23, x12
+ umulh x19, x19, x12
+ adcs x15, x4, x15
+ adcs x4, xzr, xzr
+ adds x8, x8, x19
+ ldr x19, [x1, #24]
+ umulh x6, x6, x12
+ adcs x3, x3, x6
+ ldr x6, [x1, #48]
+ adcs x18, x18, x21
+ ldr x20, [x1, #48]
+ umulh x21, x23, x12
+ umulh x14, x14, x12
+ umulh x13, x13, x12
+ umulh x12, x12, x12
+ adcs x9, x9, x12
+ adcs x10, x10, x13
+ ldp x12, x13, [x1]
+ adcs x14, x15, x14
+ mul x15, x16, x5
+ adcs x4, x4, x21
+ mul x21, x6, x5
+ adds x8, x15, x8
+ mul x15, x17, x5
+ str x8, [x0, #32]
+ mul x8, x22, x5
+ adcs x8, x8, x3
+ mul x3, x7, x5
+ adcs x18, x3, x18
+ mul x3, x19, x5
+ adcs x9, x3, x9
+ mul x3, x5, x5
+ adcs x10, x3, x10
+ umulh x3, x16, x5
+ adcs x14, x15, x14
+ adcs x4, x21, x4
+ adcs x21, xzr, xzr
+ adds x8, x8, x3
+ umulh x3, x22, x5
+ adcs x18, x18, x3
+ umulh x3, x7, x5
+ adcs x9, x9, x3
+ umulh x3, x19, x5
+ adcs x10, x10, x3
+ umulh x3, x5, x5
+ adcs x14, x14, x3
+ umulh x3, x6, x5
+ umulh x5, x17, x5
+ adcs x4, x4, x5
+ adcs x3, x21, x3
+ mul x21, x16, x17
+ adds x8, x21, x8
+ ldp x21, x1, [x1, #16]
+ str x8, [x0, #40]
+ mul x8, x22, x17
+ adcs x8, x8, x18
+ mul x18, x7, x17
+ adcs x9, x18, x9
+ mul x18, x19, x17
+ adcs x10, x18, x10
+ mul x18, x6, x17
+ adcs x14, x15, x14
+ mul x15, x17, x17
+ umulh x6, x6, x17
+ umulh x19, x19, x17
+ umulh x7, x7, x17
+ umulh x22, x22, x17
+ umulh x16, x16, x17
+ umulh x17, x17, x17
+ adcs x15, x15, x4
+ mul x4, x12, x20
+ adcs x18, x18, x3
+ adcs x3, xzr, xzr
+ adds x8, x8, x16
+ mul x16, x11, x20
+ adcs x9, x9, x22
+ mul x22, x2, x20
+ adcs x10, x10, x7
+ mul x7, x1, x20
+ adcs x14, x14, x19
+ mul x19, x21, x20
+ adcs x15, x15, x5
+ mul x5, x13, x20
+ adcs x17, x18, x17
+ mul x18, x20, x20
+ umulh x12, x12, x20
+ umulh x13, x13, x20
+ umulh x21, x21, x20
+ umulh x1, x1, x20
+ umulh x2, x2, x20
+ umulh x11, x11, x20
+ umulh x20, x20, x20
+ adcs x3, x3, x6
+ adds x8, x4, x8
+ str x8, [x0, #48]
+ adcs x8, x5, x9
+ adcs x9, x19, x10
+ adcs x10, x7, x14
+ adcs x14, x22, x15
+ adcs x15, x16, x17
+ adcs x16, x18, x3
+ adcs x17, xzr, xzr
+ adds x8, x8, x12
+ str x8, [x0, #56]
+ adcs x8, x9, x13
+ str x8, [x0, #64]
+ adcs x8, x10, x21
+ str x8, [x0, #72]
+ adcs x8, x14, x1
+ str x8, [x0, #80]
+ adcs x8, x15, x2
+ str x8, [x0, #88]
+ adcs x8, x16, x11
+ str x8, [x0, #96]
+ adcs x8, x17, x20
+ str x8, [x0, #104]
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 2
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: // @mcl_fp_mont7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #144 // =144
+ str x2, [sp, #112] // 8-byte Folded Spill
+ str x0, [sp, #64] // 8-byte Folded Spill
+ ldr x6, [x2]
+ ldr x15, [x1, #48]
+ str x15, [sp, #96] // 8-byte Folded Spill
+ ldr x0, [x1, #32]
+ str x0, [sp, #56] // 8-byte Folded Spill
+ ldr x18, [x1, #40]
+ ldp x11, x13, [x1, #16]
+ ldp x17, x5, [x1]
+ str x5, [sp, #88] // 8-byte Folded Spill
+ ldur x12, [x3, #-8]
+ str x12, [sp, #128] // 8-byte Folded Spill
+ ldr x1, [x3, #32]
+ str x1, [sp, #104] // 8-byte Folded Spill
+ ldr x9, [x3, #40]
+ str x9, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x3, #16]
+ str x8, [sp, #136] // 8-byte Folded Spill
+ ldr x10, [x3, #24]
+ str x10, [sp, #120] // 8-byte Folded Spill
+ ldr x14, [x3]
+ str x14, [sp, #24] // 8-byte Folded Spill
+ ldr x4, [x3, #8]
+ str x4, [sp, #72] // 8-byte Folded Spill
+ ldr x7, [x2, #8]
+ umulh x19, x15, x6
+ mul x20, x15, x6
+ umulh x21, x18, x6
+ mul x22, x18, x6
+ mov x15, x0
+ umulh x23, x15, x6
+ mul x24, x15, x6
+ mov x16, x13
+ umulh x25, x16, x6
+ mul x26, x16, x6
+ mov x13, x11
+ umulh x27, x13, x6
+ mul x28, x13, x6
+ mul x29, x5, x6
+ mov x11, x17
+ umulh x30, x11, x6
+ adds x29, x30, x29
+ umulh x30, x5, x6
+ mul x6, x11, x6
+ adcs x28, x30, x28
+ mul x30, x6, x12
+ adcs x26, x27, x26
+ mul x27, x30, x10
+ adcs x24, x25, x24
+ mul x25, x30, x8
+ adcs x22, x23, x22
+ mul x23, x30, x4
+ adcs x20, x21, x20
+ umulh x21, x30, x14
+ adcs x19, x19, xzr
+ adds x21, x21, x23
+ umulh x23, x30, x4
+ adcs x23, x23, x25
+ umulh x25, x30, x8
+ adcs x25, x25, x27
+ mul x27, x30, x1
+ umulh x17, x30, x10
+ adcs x17, x17, x27
+ ldr x3, [x3, #48]
+ str x3, [sp, #48] // 8-byte Folded Spill
+ mul x27, x30, x9
+ umulh x0, x30, x1
+ adcs x0, x0, x27
+ mul x27, x30, x3
+ umulh x2, x30, x9
+ adcs x2, x2, x27
+ umulh x27, x30, x3
+ mul x30, x30, x14
+ adcs x27, x27, xzr
+ cmn x30, x6
+ adcs x6, x21, x29
+ adcs x21, x23, x28
+ mul x23, x7, x15
+ adcs x25, x25, x26
+ mul x26, x7, x16
+ adcs x17, x17, x24
+ mul x24, x7, x13
+ adcs x0, x0, x22
+ mul x22, x7, x5
+ adcs x2, x2, x20
+ umulh x20, x7, x11
+ adcs x19, x27, x19
+ adcs x27, xzr, xzr
+ adds x20, x20, x22
+ umulh x22, x7, x5
+ adcs x22, x22, x24
+ umulh x24, x7, x13
+ mov x5, x13
+ adcs x24, x24, x26
+ umulh x26, x7, x16
+ adcs x23, x26, x23
+ mul x26, x7, x18
+ umulh x28, x7, x15
+ adcs x26, x28, x26
+ ldr x15, [sp, #96] // 8-byte Folded Reload
+ mul x28, x7, x15
+ umulh x29, x7, x18
+ adcs x28, x29, x28
+ umulh x29, x7, x15
+ mul x7, x7, x11
+ adcs x29, x29, xzr
+ adds x30, x6, x7
+ adcs x6, x21, x20
+ adcs x25, x25, x22
+ mul x22, x30, x12
+ adcs x24, x17, x24
+ mul x17, x22, x10
+ adcs x0, x0, x23
+ mul x23, x22, x8
+ adcs x7, x2, x26
+ mul x2, x22, x4
+ adcs x20, x19, x28
+ umulh x26, x22, x14
+ adcs x21, x27, x29
+ adcs x19, xzr, xzr
+ adds x2, x26, x2
+ umulh x26, x22, x4
+ adcs x23, x26, x23
+ umulh x26, x22, x8
+ adcs x17, x26, x17
+ mul x26, x22, x1
+ umulh x27, x22, x10
+ adcs x26, x27, x26
+ mul x27, x22, x9
+ umulh x28, x22, x1
+ adcs x27, x28, x27
+ mul x28, x22, x3
+ umulh x29, x22, x9
+ adcs x28, x29, x28
+ umulh x29, x22, x3
+ mul x22, x22, x14
+ mov x10, x14
+ adcs x29, x29, xzr
+ cmn x22, x30
+ adcs x22, x2, x6
+ adcs x23, x23, x25
+ ldr x8, [sp, #112] // 8-byte Folded Reload
+ adcs x24, x17, x24
+ ldp x25, x17, [x8, #16]
+ adcs x0, x26, x0
+ mul x2, x25, x16
+ adcs x6, x27, x7
+ mul x7, x25, x5
+ adcs x20, x28, x20
+ ldp x15, x8, [sp, #88]
+ mul x26, x25, x15
+ adcs x21, x29, x21
+ mov x12, x11
+ umulh x27, x25, x12
+ adcs x19, x19, xzr
+ adds x26, x27, x26
+ umulh x27, x25, x15
+ adcs x7, x27, x7
+ umulh x27, x25, x5
+ mov x9, x5
+ adcs x2, x27, x2
+ ldr x11, [sp, #56] // 8-byte Folded Reload
+ mul x27, x25, x11
+ umulh x28, x25, x16
+ mov x13, x16
+ adcs x27, x28, x27
+ mul x28, x25, x18
+ umulh x29, x25, x11
+ adcs x28, x29, x28
+ mul x29, x25, x8
+ umulh x30, x25, x18
+ adcs x29, x30, x29
+ umulh x30, x25, x8
+ mov x14, x8
+ mul x25, x25, x12
+ mov x5, x12
+ adcs x30, x30, xzr
+ adds x22, x22, x25
+ adcs x23, x23, x26
+ adcs x7, x24, x7
+ adcs x0, x0, x2
+ ldp x8, x12, [sp, #128]
+ mul x2, x22, x8
+ adcs x6, x6, x27
+ mul x24, x2, x12
+ adcs x20, x20, x28
+ mul x25, x2, x4
+ adcs x21, x21, x29
+ mov x1, x10
+ umulh x26, x2, x1
+ adcs x19, x19, x30
+ adcs x27, xzr, xzr
+ adds x25, x26, x25
+ umulh x26, x2, x4
+ adcs x24, x26, x24
+ ldr x10, [sp, #120] // 8-byte Folded Reload
+ mul x26, x2, x10
+ umulh x28, x2, x12
+ adcs x26, x28, x26
+ ldr x12, [sp, #104] // 8-byte Folded Reload
+ mul x28, x2, x12
+ umulh x29, x2, x10
+ adcs x28, x29, x28
+ ldr x10, [sp, #80] // 8-byte Folded Reload
+ mul x29, x2, x10
+ umulh x30, x2, x12
+ adcs x29, x30, x29
+ mul x30, x2, x3
+ umulh x12, x2, x10
+ adcs x12, x12, x30
+ umulh x30, x2, x3
+ mul x2, x2, x1
+ adcs x30, x30, xzr
+ cmn x2, x22
+ adcs x2, x25, x23
+ adcs x7, x24, x7
+ adcs x0, x26, x0
+ mul x22, x17, x11
+ adcs x6, x28, x6
+ mul x23, x17, x13
+ adcs x20, x29, x20
+ mul x24, x17, x9
+ adcs x12, x12, x21
+ mul x21, x17, x15
+ adcs x19, x30, x19
+ umulh x25, x17, x5
+ adcs x26, x27, xzr
+ adds x21, x25, x21
+ umulh x25, x17, x15
+ adcs x24, x25, x24
+ umulh x25, x17, x9
+ mov x16, x9
+ adcs x23, x25, x23
+ umulh x25, x17, x13
+ adcs x22, x25, x22
+ mul x25, x17, x18
+ umulh x27, x17, x11
+ adcs x25, x27, x25
+ mov x9, x14
+ mul x27, x17, x9
+ umulh x28, x17, x18
+ adcs x27, x28, x27
+ umulh x28, x17, x9
+ mul x17, x17, x5
+ mov x15, x5
+ adcs x28, x28, xzr
+ adds x17, x2, x17
+ adcs x2, x7, x21
+ adcs x0, x0, x24
+ mul x24, x17, x8
+ adcs x29, x6, x23
+ ldr x9, [sp, #120] // 8-byte Folded Reload
+ mul x23, x24, x9
+ adcs x6, x20, x22
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x22, x24, x8
+ adcs x7, x12, x25
+ mul x12, x24, x4
+ adcs x20, x19, x27
+ umulh x25, x24, x1
+ adcs x21, x26, x28
+ adcs x19, xzr, xzr
+ adds x12, x25, x12
+ umulh x25, x24, x4
+ adcs x25, x25, x22
+ umulh x22, x24, x8
+ adcs x26, x22, x23
+ ldr x5, [sp, #104] // 8-byte Folded Reload
+ mul x22, x24, x5
+ umulh x23, x24, x9
+ adcs x27, x23, x22
+ mov x9, x10
+ mul x22, x24, x9
+ umulh x23, x24, x5
+ adcs x28, x23, x22
+ mul x22, x24, x3
+ umulh x23, x24, x9
+ adcs x30, x23, x22
+ umulh x22, x24, x3
+ mul x23, x24, x1
+ mov x3, x1
+ adcs x24, x22, xzr
+ cmn x23, x17
+ adcs x22, x12, x2
+ adcs x23, x25, x0
+ ldr x10, [sp, #112] // 8-byte Folded Reload
+ ldp x12, x0, [x10, #32]
+ adcs x17, x26, x29
+ adcs x2, x27, x6
+ mul x6, x12, x13
+ adcs x7, x28, x7
+ mov x10, x16
+ mul x25, x12, x10
+ adcs x20, x30, x20
+ ldr x16, [sp, #88] // 8-byte Folded Reload
+ mul x26, x12, x16
+ adcs x21, x24, x21
+ umulh x24, x12, x15
+ adcs x1, x19, xzr
+ adds x24, x24, x26
+ umulh x26, x12, x16
+ adcs x25, x26, x25
+ umulh x26, x12, x10
+ adcs x6, x26, x6
+ mul x26, x12, x11
+ umulh x27, x12, x13
+ adcs x26, x27, x26
+ mul x27, x12, x18
+ umulh x28, x12, x11
+ adcs x27, x28, x27
+ mul x28, x12, x14
+ umulh x29, x12, x18
+ adcs x28, x29, x28
+ umulh x29, x12, x14
+ mul x12, x12, x15
+ adcs x29, x29, xzr
+ adds x12, x22, x12
+ adcs x22, x23, x24
+ adcs x17, x17, x25
+ adcs x2, x2, x6
+ ldr x19, [sp, #128] // 8-byte Folded Reload
+ mul x6, x12, x19
+ adcs x7, x7, x26
+ mov x30, x8
+ mul x23, x6, x30
+ adcs x20, x20, x27
+ mul x24, x6, x4
+ adcs x21, x21, x28
+ mov x8, x3
+ umulh x25, x6, x8
+ adcs x1, x1, x29
+ adcs x26, xzr, xzr
+ adds x24, x25, x24
+ umulh x25, x6, x4
+ adcs x23, x25, x23
+ ldr x4, [sp, #120] // 8-byte Folded Reload
+ mul x25, x6, x4
+ umulh x27, x6, x30
+ adcs x25, x27, x25
+ mul x27, x6, x5
+ umulh x28, x6, x4
+ adcs x27, x28, x27
+ mov x3, x9
+ mul x28, x6, x3
+ umulh x29, x6, x5
+ adcs x28, x29, x28
+ ldr x9, [sp, #48] // 8-byte Folded Reload
+ mul x29, x6, x9
+ umulh x30, x6, x3
+ adcs x29, x30, x29
+ umulh x30, x6, x9
+ mov x3, x9
+ mul x6, x6, x8
+ mov x5, x8
+ adcs x30, x30, xzr
+ cmn x6, x12
+ adcs x12, x24, x22
+ adcs x17, x23, x17
+ adcs x2, x25, x2
+ mul x6, x0, x11
+ adcs x7, x27, x7
+ mul x22, x0, x13
+ adcs x20, x28, x20
+ mul x23, x0, x10
+ adcs x21, x29, x21
+ mul x24, x0, x16
+ adcs x29, x30, x1
+ mov x1, x15
+ umulh x25, x0, x1
+ adcs x26, x26, xzr
+ adds x24, x25, x24
+ umulh x25, x0, x16
+ adcs x23, x25, x23
+ umulh x25, x0, x10
+ adcs x22, x25, x22
+ umulh x25, x0, x13
+ adcs x6, x25, x6
+ mul x25, x0, x18
+ umulh x27, x0, x11
+ adcs x25, x27, x25
+ mov x9, x14
+ mul x27, x0, x9
+ umulh x28, x0, x18
+ adcs x27, x28, x27
+ umulh x28, x0, x9
+ mul x0, x0, x1
+ adcs x28, x28, xzr
+ adds x12, x12, x0
+ adcs x8, x17, x24
+ str x8, [sp, #40] // 8-byte Folded Spill
+ adcs x8, x2, x23
+ str x8, [sp, #32] // 8-byte Folded Spill
+ mul x2, x12, x19
+ adcs x7, x7, x22
+ mul x22, x2, x4
+ adcs x8, x20, x6
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x20, x2, x8
+ adcs x21, x21, x25
+ ldr x9, [sp, #72] // 8-byte Folded Reload
+ mul x23, x2, x9
+ adcs x19, x29, x27
+ mov x15, x5
+ umulh x24, x2, x15
+ adcs x17, x26, x28
+ str x17, [sp, #8] // 8-byte Folded Spill
+ adcs x26, xzr, xzr
+ adds x23, x24, x23
+ umulh x24, x2, x9
+ adcs x20, x24, x20
+ umulh x24, x2, x8
+ adcs x22, x24, x22
+ ldp x25, x8, [sp, #104]
+ mul x24, x2, x25
+ umulh x27, x2, x4
+ adcs x6, x27, x24
+ ldr x5, [sp, #80] // 8-byte Folded Reload
+ mul x27, x2, x5
+ umulh x28, x2, x25
+ adcs x27, x28, x27
+ mul x28, x2, x3
+ umulh x29, x2, x5
+ adcs x28, x29, x28
+ ldr x29, [x8, #48]
+ mul x30, x2, x15
+ umulh x2, x2, x3
+ adcs x2, x2, xzr
+ cmn x30, x12
+ umulh x24, x29, x14
+ mul x30, x29, x14
+ umulh x0, x29, x18
+ mul x18, x29, x18
+ umulh x17, x29, x11
+ mul x15, x29, x11
+ umulh x14, x29, x13
+ mul x13, x29, x13
+ umulh x12, x29, x10
+ mul x11, x29, x10
+ mul x10, x29, x16
+ umulh x9, x29, x16
+ umulh x8, x29, x1
+ mul x29, x29, x1
+ ldr x16, [sp, #40] // 8-byte Folded Reload
+ adcs x23, x23, x16
+ ldr x16, [sp, #32] // 8-byte Folded Reload
+ adcs x20, x20, x16
+ adcs x7, x22, x7
+ ldr x16, [sp, #16] // 8-byte Folded Reload
+ adcs x6, x6, x16
+ adcs x21, x27, x21
+ adcs x19, x28, x19
+ ldr x16, [sp, #8] // 8-byte Folded Reload
+ adcs x2, x2, x16
+ adcs x22, x26, xzr
+ adds x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, x12, x13
+ adcs x11, x14, x15
+ adcs x12, x17, x18
+ adcs x13, x0, x30
+ adcs x14, x24, xzr
+ adds x15, x23, x29
+ adcs x8, x20, x8
+ ldr x16, [sp, #128] // 8-byte Folded Reload
+ mul x16, x15, x16
+ adcs x9, x7, x9
+ mul x17, x16, x3
+ mul x18, x16, x5
+ mul x0, x16, x25
+ adcs x10, x6, x10
+ mul x6, x16, x4
+ adcs x11, x21, x11
+ ldr x21, [sp, #136] // 8-byte Folded Reload
+ mul x7, x16, x21
+ adcs x12, x19, x12
+ ldr x23, [sp, #72] // 8-byte Folded Reload
+ mul x19, x16, x23
+ adcs x13, x2, x13
+ ldr x24, [sp, #24] // 8-byte Folded Reload
+ umulh x2, x16, x24
+ adcs x14, x22, x14
+ adcs x20, xzr, xzr
+ adds x2, x2, x19
+ umulh x19, x16, x23
+ adcs x7, x19, x7
+ umulh x19, x16, x21
+ adcs x6, x19, x6
+ umulh x19, x16, x4
+ adcs x0, x19, x0
+ umulh x19, x16, x25
+ adcs x18, x19, x18
+ umulh x19, x16, x5
+ adcs x17, x19, x17
+ umulh x19, x16, x3
+ mul x16, x16, x24
+ adcs x19, x19, xzr
+ cmn x16, x15
+ adcs x8, x2, x8
+ adcs x9, x7, x9
+ adcs x10, x6, x10
+ adcs x11, x0, x11
+ adcs x12, x18, x12
+ adcs x13, x17, x13
+ adcs x14, x19, x14
+ adcs x15, x20, xzr
+ subs x16, x8, x24
+ sbcs x17, x9, x23
+ sbcs x18, x10, x21
+ sbcs x0, x11, x4
+ sbcs x1, x12, x25
+ sbcs x2, x13, x5
+ sbcs x3, x14, x3
+ sbcs x15, x15, xzr
+ tst x15, #0x1
+ csel x8, x8, x16, ne
+ csel x9, x9, x17, ne
+ csel x10, x10, x18, ne
+ csel x11, x11, x0, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x2, ne
+ csel x14, x14, x3, ne
+ ldr x15, [sp, #64] // 8-byte Folded Reload
+ stp x8, x9, [x15]
+ stp x10, x11, [x15, #16]
+ stp x12, x13, [x15, #32]
+ str x14, [x15, #48]
+ add sp, sp, #144 // =144
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 2
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: // @mcl_fp_montNF7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ stp x0, x2, [sp, #8]
+ ldr x7, [x2]
+ ldp x5, x16, [x1, #40]
+ ldp x6, x17, [x1, #24]
+ ldr x4, [x1]
+ ldp x1, x18, [x1, #8]
+ ldur x8, [x3, #-8]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x15, x0, [x3, #40]
+ ldp x11, x10, [x3, #24]
+ ldp x13, x12, [x3, #8]
+ ldr x14, [x3]
+ ldr x25, [x2, #8]
+ umulh x3, x16, x7
+ mul x19, x16, x7
+ umulh x20, x5, x7
+ mul x21, x5, x7
+ umulh x22, x17, x7
+ mul x23, x17, x7
+ umulh x24, x6, x7
+ mul x26, x6, x7
+ umulh x27, x18, x7
+ mul x28, x18, x7
+ mul x29, x1, x7
+ umulh x30, x4, x7
+ adds x29, x30, x29
+ umulh x30, x1, x7
+ mul x7, x4, x7
+ adcs x28, x30, x28
+ mul x30, x25, x5
+ adcs x26, x27, x26
+ mul x27, x25, x17
+ adcs x23, x24, x23
+ mul x24, x25, x6
+ adcs x21, x22, x21
+ mul x22, x7, x8
+ adcs x19, x20, x19
+ mul x20, x22, x14
+ adcs x3, x3, xzr
+ cmn x20, x7
+ mul x9, x25, x18
+ mul x7, x22, x13
+ adcs x7, x7, x29
+ mul x20, x22, x12
+ adcs x20, x20, x28
+ mul x28, x22, x11
+ adcs x26, x28, x26
+ mul x28, x22, x10
+ adcs x23, x28, x23
+ mul x28, x22, x15
+ adcs x21, x28, x21
+ mul x28, x22, x0
+ adcs x19, x28, x19
+ umulh x28, x22, x14
+ adcs x29, x3, xzr
+ adds x28, x7, x28
+ umulh x3, x22, x13
+ adcs x8, x20, x3
+ umulh x3, x22, x12
+ adcs x26, x26, x3
+ umulh x3, x22, x11
+ adcs x3, x23, x3
+ umulh x7, x22, x10
+ adcs x7, x21, x7
+ umulh x20, x22, x15
+ adcs x19, x19, x20
+ mul x21, x25, x1
+ umulh x20, x22, x0
+ adcs x20, x29, x20
+ umulh x22, x25, x4
+ adds x29, x22, x21
+ umulh x21, x25, x1
+ adcs x23, x21, x9
+ umulh x9, x25, x18
+ adcs x21, x9, x24
+ umulh x9, x25, x6
+ adcs x22, x9, x27
+ umulh x9, x25, x17
+ adcs x30, x9, x30
+ mul x9, x25, x16
+ umulh x24, x25, x5
+ adcs x24, x24, x9
+ umulh x9, x25, x16
+ mul x25, x25, x4
+ adcs x9, x9, xzr
+ adds x27, x25, x28
+ adcs x25, x29, x8
+ ldp x28, x8, [x2, #16]
+ adcs x29, x23, x26
+ adcs x3, x21, x3
+ mul x21, x28, x17
+ adcs x7, x22, x7
+ mul x22, x28, x6
+ adcs x19, x30, x19
+ ldr x2, [sp, #24] // 8-byte Folded Reload
+ mul x23, x27, x2
+ adcs x20, x24, x20
+ mul x24, x23, x14
+ adcs x9, x9, xzr
+ cmn x24, x27
+ mul x24, x28, x18
+ mul x26, x23, x13
+ adcs x25, x26, x25
+ mul x26, x23, x12
+ adcs x26, x26, x29
+ mul x27, x23, x11
+ adcs x3, x27, x3
+ mul x27, x23, x10
+ adcs x7, x27, x7
+ mul x27, x23, x15
+ adcs x19, x27, x19
+ mul x27, x23, x0
+ adcs x20, x27, x20
+ umulh x27, x23, x14
+ adcs x9, x9, xzr
+ adds x25, x25, x27
+ umulh x27, x23, x13
+ adcs x26, x26, x27
+ umulh x27, x23, x12
+ adcs x3, x3, x27
+ umulh x27, x23, x11
+ adcs x7, x7, x27
+ umulh x27, x23, x10
+ adcs x19, x19, x27
+ umulh x27, x23, x15
+ adcs x20, x20, x27
+ mul x27, x28, x1
+ umulh x23, x23, x0
+ adcs x9, x9, x23
+ umulh x23, x28, x4
+ adds x23, x23, x27
+ umulh x27, x28, x1
+ adcs x24, x27, x24
+ umulh x27, x28, x18
+ adcs x22, x27, x22
+ umulh x27, x28, x6
+ adcs x21, x27, x21
+ mul x27, x28, x5
+ umulh x29, x28, x17
+ adcs x27, x29, x27
+ mul x29, x28, x16
+ umulh x30, x28, x5
+ adcs x29, x30, x29
+ umulh x30, x28, x16
+ mul x28, x28, x4
+ adcs x30, x30, xzr
+ adds x25, x28, x25
+ adcs x23, x23, x26
+ adcs x3, x24, x3
+ mul x26, x8, x5
+ adcs x7, x22, x7
+ mul x22, x8, x17
+ adcs x19, x21, x19
+ mul x24, x8, x6
+ adcs x20, x27, x20
+ mul x21, x25, x2
+ adcs x9, x29, x9
+ mul x27, x21, x14
+ adcs x28, x30, xzr
+ cmn x27, x25
+ mul x25, x8, x18
+ mul x27, x21, x13
+ adcs x23, x27, x23
+ mul x27, x21, x12
+ adcs x3, x27, x3
+ mul x27, x21, x11
+ adcs x7, x27, x7
+ mul x27, x21, x10
+ adcs x19, x27, x19
+ mul x27, x21, x15
+ adcs x20, x27, x20
+ mul x27, x21, x0
+ adcs x9, x27, x9
+ umulh x27, x21, x14
+ adcs x28, x28, xzr
+ adds x27, x23, x27
+ umulh x23, x21, x13
+ adcs x3, x3, x23
+ umulh x23, x21, x12
+ adcs x30, x7, x23
+ umulh x7, x21, x11
+ adcs x7, x19, x7
+ umulh x19, x21, x10
+ adcs x19, x20, x19
+ umulh x20, x21, x15
+ adcs x20, x9, x20
+ mul x9, x8, x1
+ umulh x21, x21, x0
+ adcs x21, x28, x21
+ umulh x23, x8, x4
+ adds x9, x23, x9
+ umulh x23, x8, x1
+ adcs x28, x23, x25
+ umulh x23, x8, x18
+ adcs x23, x23, x24
+ umulh x24, x8, x6
+ adcs x24, x24, x22
+ umulh x22, x8, x17
+ adcs x25, x22, x26
+ mul x22, x8, x16
+ umulh x26, x8, x5
+ adcs x26, x26, x22
+ umulh x22, x8, x16
+ mul x29, x8, x4
+ adcs x2, x22, xzr
+ adds x29, x29, x27
+ adcs x27, x9, x3
+ ldr x8, [sp, #16] // 8-byte Folded Reload
+ ldp x22, x3, [x8, #32]
+ adcs x9, x28, x30
+ adcs x7, x23, x7
+ mul x23, x22, x17
+ adcs x19, x24, x19
+ mul x24, x22, x6
+ adcs x20, x25, x20
+ ldr x8, [sp, #24] // 8-byte Folded Reload
+ mul x25, x29, x8
+ adcs x21, x26, x21
+ mul x26, x25, x14
+ adcs x2, x2, xzr
+ cmn x26, x29
+ mul x26, x22, x18
+ mul x28, x25, x13
+ adcs x27, x28, x27
+ mul x28, x25, x12
+ adcs x9, x28, x9
+ mul x28, x25, x11
+ adcs x7, x28, x7
+ mul x28, x25, x10
+ adcs x19, x28, x19
+ mul x28, x25, x15
+ adcs x20, x28, x20
+ mul x28, x25, x0
+ adcs x21, x28, x21
+ umulh x28, x25, x14
+ adcs x2, x2, xzr
+ adds x27, x27, x28
+ umulh x28, x25, x13
+ adcs x9, x9, x28
+ umulh x28, x25, x12
+ adcs x7, x7, x28
+ umulh x28, x25, x11
+ adcs x19, x19, x28
+ umulh x28, x25, x10
+ adcs x20, x20, x28
+ umulh x28, x25, x15
+ adcs x21, x21, x28
+ mul x28, x22, x1
+ umulh x25, x25, x0
+ adcs x2, x2, x25
+ umulh x25, x22, x4
+ adds x25, x25, x28
+ umulh x28, x22, x1
+ adcs x26, x28, x26
+ umulh x28, x22, x18
+ adcs x24, x28, x24
+ umulh x28, x22, x6
+ adcs x23, x28, x23
+ mul x28, x22, x5
+ umulh x29, x22, x17
+ adcs x28, x29, x28
+ mul x29, x22, x16
+ umulh x30, x22, x5
+ adcs x29, x30, x29
+ umulh x30, x22, x16
+ mul x22, x22, x4
+ adcs x30, x30, xzr
+ adds x22, x22, x27
+ adcs x9, x25, x9
+ adcs x7, x26, x7
+ mul x25, x3, x5
+ adcs x19, x24, x19
+ mul x24, x3, x17
+ adcs x20, x23, x20
+ mul x23, x3, x6
+ adcs x21, x28, x21
+ mul x26, x22, x8
+ adcs x8, x29, x2
+ mul x27, x26, x14
+ adcs x28, x30, xzr
+ cmn x27, x22
+ mul x22, x3, x18
+ mul x27, x26, x13
+ adcs x9, x27, x9
+ mul x27, x26, x12
+ adcs x7, x27, x7
+ mul x27, x26, x11
+ adcs x19, x27, x19
+ mul x27, x26, x10
+ adcs x20, x27, x20
+ mul x27, x26, x15
+ adcs x21, x27, x21
+ mul x27, x26, x0
+ adcs x8, x27, x8
+ umulh x27, x26, x14
+ adcs x28, x28, xzr
+ adds x9, x9, x27
+ umulh x27, x26, x13
+ adcs x7, x7, x27
+ umulh x27, x26, x12
+ adcs x19, x19, x27
+ umulh x27, x26, x11
+ adcs x20, x20, x27
+ umulh x27, x26, x10
+ adcs x21, x21, x27
+ umulh x27, x26, x15
+ adcs x8, x8, x27
+ mul x27, x3, x1
+ umulh x26, x26, x0
+ adcs x26, x28, x26
+ umulh x28, x3, x4
+ adds x27, x28, x27
+ umulh x28, x3, x1
+ adcs x22, x28, x22
+ umulh x28, x3, x18
+ adcs x23, x28, x23
+ umulh x28, x3, x6
+ adcs x24, x28, x24
+ umulh x28, x3, x17
+ adcs x25, x28, x25
+ mul x28, x3, x16
+ umulh x29, x3, x5
+ adcs x28, x29, x28
+ ldp x2, x30, [sp, #16]
+ ldr x2, [x2, #48]
+ umulh x29, x3, x16
+ mul x3, x3, x4
+ adcs x29, x29, xzr
+ adds x9, x3, x9
+ adcs x3, x27, x7
+ umulh x7, x2, x16
+ mul x16, x2, x16
+ adcs x19, x22, x19
+ umulh x22, x2, x5
+ mul x5, x2, x5
+ adcs x20, x23, x20
+ umulh x23, x2, x17
+ mul x17, x2, x17
+ adcs x21, x24, x21
+ umulh x24, x2, x6
+ mul x6, x2, x6
+ adcs x8, x25, x8
+ mul x25, x9, x30
+ adcs x26, x28, x26
+ mul x27, x25, x14
+ adcs x28, x29, xzr
+ cmn x27, x9
+ umulh x9, x2, x18
+ mul x18, x2, x18
+ umulh x27, x2, x1
+ mul x1, x2, x1
+ umulh x29, x2, x4
+ mul x2, x2, x4
+ mul x4, x25, x13
+ adcs x3, x4, x3
+ mul x4, x25, x12
+ adcs x4, x4, x19
+ mul x19, x25, x11
+ adcs x19, x19, x20
+ mul x20, x25, x10
+ adcs x20, x20, x21
+ mul x21, x25, x15
+ adcs x8, x21, x8
+ mul x21, x25, x0
+ adcs x21, x21, x26
+ adcs x26, x28, xzr
+ umulh x28, x25, x14
+ adds x3, x3, x28
+ umulh x28, x25, x13
+ adcs x4, x4, x28
+ umulh x28, x25, x12
+ adcs x19, x19, x28
+ umulh x28, x25, x11
+ adcs x20, x20, x28
+ umulh x28, x25, x10
+ adcs x8, x8, x28
+ umulh x28, x25, x15
+ adcs x21, x21, x28
+ umulh x25, x25, x0
+ adcs x25, x26, x25
+ adds x1, x29, x1
+ adcs x18, x27, x18
+ adcs x9, x9, x6
+ adcs x17, x24, x17
+ adcs x5, x23, x5
+ adcs x16, x22, x16
+ adcs x6, x7, xzr
+ adds x2, x2, x3
+ adcs x1, x1, x4
+ adcs x18, x18, x19
+ adcs x9, x9, x20
+ adcs x8, x17, x8
+ adcs x17, x5, x21
+ mul x3, x2, x30
+ adcs x16, x16, x25
+ mul x4, x3, x14
+ adcs x5, x6, xzr
+ cmn x4, x2
+ mul x2, x3, x13
+ adcs x1, x2, x1
+ mul x2, x3, x12
+ adcs x18, x2, x18
+ mul x2, x3, x11
+ adcs x9, x2, x9
+ mul x2, x3, x10
+ adcs x8, x2, x8
+ mul x2, x3, x15
+ adcs x17, x2, x17
+ mul x2, x3, x0
+ adcs x16, x2, x16
+ umulh x2, x3, x14
+ adcs x4, x5, xzr
+ adds x1, x1, x2
+ umulh x2, x3, x13
+ adcs x18, x18, x2
+ umulh x2, x3, x12
+ adcs x9, x9, x2
+ umulh x2, x3, x11
+ adcs x8, x8, x2
+ umulh x2, x3, x10
+ adcs x17, x17, x2
+ umulh x2, x3, x15
+ adcs x16, x16, x2
+ umulh x2, x3, x0
+ adcs x2, x4, x2
+ subs x14, x1, x14
+ sbcs x13, x18, x13
+ sbcs x12, x9, x12
+ sbcs x11, x8, x11
+ sbcs x10, x17, x10
+ sbcs x15, x16, x15
+ sbcs x0, x2, x0
+ asr x3, x0, #63
+ cmp x3, #0 // =0
+ csel x14, x1, x14, lt
+ csel x13, x18, x13, lt
+ csel x9, x9, x12, lt
+ csel x8, x8, x11, lt
+ csel x10, x17, x10, lt
+ csel x11, x16, x15, lt
+ csel x12, x2, x0, lt
+ ldr x15, [sp, #8] // 8-byte Folded Reload
+ stp x14, x13, [x15]
+ stp x9, x8, [x15, #16]
+ stp x10, x11, [x15, #32]
+ str x12, [x15, #48]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 2
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: // @mcl_fp_montRed7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldur x15, [x2, #-8]
+ ldp x9, x8, [x2, #40]
+ ldp x11, x10, [x2, #24]
+ ldp x13, x12, [x2, #8]
+ ldr x14, [x2]
+ ldp x17, x18, [x1, #96]
+ ldp x2, x3, [x1, #80]
+ ldp x4, x5, [x1, #64]
+ ldp x6, x7, [x1, #48]
+ ldp x19, x20, [x1, #32]
+ ldp x21, x22, [x1, #16]
+ ldp x16, x1, [x1]
+ mul x23, x16, x15
+ mul x24, x23, x8
+ mul x25, x23, x9
+ mul x26, x23, x10
+ mul x27, x23, x11
+ mul x28, x23, x12
+ mul x29, x23, x13
+ umulh x30, x23, x14
+ adds x29, x30, x29
+ umulh x30, x23, x13
+ adcs x28, x30, x28
+ umulh x30, x23, x12
+ adcs x27, x30, x27
+ umulh x30, x23, x11
+ adcs x26, x30, x26
+ umulh x30, x23, x10
+ adcs x25, x30, x25
+ umulh x30, x23, x9
+ adcs x24, x30, x24
+ umulh x30, x23, x8
+ mul x23, x23, x14
+ adcs x30, x30, xzr
+ cmn x16, x23
+ adcs x16, x1, x29
+ adcs x1, x21, x28
+ mul x21, x16, x15
+ adcs x22, x22, x27
+ mul x23, x21, x8
+ mul x27, x21, x9
+ mul x28, x21, x10
+ mul x29, x21, x11
+ adcs x19, x19, x26
+ mul x26, x21, x12
+ adcs x20, x20, x25
+ mul x25, x21, x13
+ adcs x6, x6, x24
+ umulh x24, x21, x14
+ adcs x7, x7, x30
+ adcs x4, x4, xzr
+ adcs x5, x5, xzr
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x30, xzr, xzr
+ adds x24, x24, x25
+ umulh x25, x21, x13
+ adcs x25, x25, x26
+ umulh x26, x21, x12
+ adcs x26, x26, x29
+ umulh x29, x21, x11
+ adcs x28, x29, x28
+ umulh x29, x21, x10
+ adcs x27, x29, x27
+ umulh x29, x21, x9
+ adcs x23, x29, x23
+ umulh x29, x21, x8
+ mul x21, x21, x14
+ adcs x29, x29, xzr
+ cmn x21, x16
+ adcs x16, x24, x1
+ adcs x1, x25, x22
+ mul x21, x16, x15
+ adcs x19, x26, x19
+ mul x22, x21, x8
+ mul x24, x21, x9
+ mul x25, x21, x10
+ adcs x20, x28, x20
+ mul x26, x21, x11
+ adcs x6, x27, x6
+ mul x27, x21, x12
+ adcs x7, x23, x7
+ mul x23, x21, x13
+ adcs x4, x29, x4
+ umulh x28, x21, x14
+ adcs x5, x5, xzr
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x29, x30, xzr
+ adds x23, x28, x23
+ umulh x28, x21, x13
+ adcs x27, x28, x27
+ umulh x28, x21, x12
+ adcs x26, x28, x26
+ umulh x28, x21, x11
+ adcs x25, x28, x25
+ umulh x28, x21, x10
+ adcs x24, x28, x24
+ umulh x28, x21, x9
+ adcs x22, x28, x22
+ umulh x28, x21, x8
+ mul x21, x21, x14
+ adcs x28, x28, xzr
+ cmn x21, x16
+ adcs x16, x23, x1
+ adcs x1, x27, x19
+ mul x19, x16, x15
+ adcs x20, x26, x20
+ mul x21, x19, x8
+ mul x23, x19, x9
+ mul x26, x19, x10
+ adcs x6, x25, x6
+ mul x25, x19, x11
+ adcs x7, x24, x7
+ mul x24, x19, x12
+ adcs x4, x22, x4
+ mul x22, x19, x13
+ adcs x5, x28, x5
+ umulh x27, x19, x14
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x28, x29, xzr
+ adds x22, x27, x22
+ umulh x27, x19, x13
+ adcs x24, x27, x24
+ umulh x27, x19, x12
+ adcs x25, x27, x25
+ umulh x27, x19, x11
+ adcs x26, x27, x26
+ umulh x27, x19, x10
+ adcs x23, x27, x23
+ umulh x27, x19, x9
+ adcs x21, x27, x21
+ umulh x27, x19, x8
+ mul x19, x19, x14
+ adcs x27, x27, xzr
+ cmn x19, x16
+ adcs x16, x22, x1
+ adcs x1, x24, x20
+ mul x19, x16, x15
+ adcs x6, x25, x6
+ mul x20, x19, x8
+ mul x22, x19, x9
+ mul x24, x19, x10
+ adcs x7, x26, x7
+ mul x25, x19, x11
+ adcs x4, x23, x4
+ mul x23, x19, x12
+ adcs x5, x21, x5
+ mul x21, x19, x13
+ adcs x2, x27, x2
+ umulh x26, x19, x14
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x27, x28, xzr
+ adds x21, x26, x21
+ umulh x26, x19, x13
+ adcs x23, x26, x23
+ umulh x26, x19, x12
+ adcs x25, x26, x25
+ umulh x26, x19, x11
+ adcs x24, x26, x24
+ umulh x26, x19, x10
+ adcs x22, x26, x22
+ umulh x26, x19, x9
+ adcs x20, x26, x20
+ umulh x26, x19, x8
+ mul x19, x19, x14
+ adcs x26, x26, xzr
+ cmn x19, x16
+ adcs x16, x21, x1
+ adcs x1, x23, x6
+ mul x6, x16, x15
+ adcs x7, x25, x7
+ mul x19, x6, x8
+ mul x21, x6, x9
+ mul x23, x6, x10
+ adcs x4, x24, x4
+ mul x24, x6, x11
+ adcs x5, x22, x5
+ mul x22, x6, x12
+ adcs x2, x20, x2
+ mul x20, x6, x13
+ adcs x3, x26, x3
+ umulh x25, x6, x14
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x26, x27, xzr
+ adds x20, x25, x20
+ umulh x25, x6, x13
+ adcs x22, x25, x22
+ umulh x25, x6, x12
+ adcs x24, x25, x24
+ umulh x25, x6, x11
+ adcs x23, x25, x23
+ umulh x25, x6, x10
+ adcs x21, x25, x21
+ umulh x25, x6, x9
+ adcs x19, x25, x19
+ umulh x25, x6, x8
+ mul x6, x6, x14
+ adcs x25, x25, xzr
+ cmn x6, x16
+ adcs x16, x20, x1
+ adcs x1, x22, x7
+ mul x15, x16, x15
+ adcs x4, x24, x4
+ mul x6, x15, x8
+ mul x7, x15, x9
+ mul x20, x15, x10
+ adcs x5, x23, x5
+ mul x22, x15, x11
+ adcs x2, x21, x2
+ mul x21, x15, x12
+ adcs x3, x19, x3
+ mul x19, x15, x13
+ adcs x17, x25, x17
+ umulh x23, x15, x14
+ adcs x18, x18, xzr
+ adcs x24, x26, xzr
+ adds x19, x23, x19
+ umulh x23, x15, x13
+ adcs x21, x23, x21
+ umulh x23, x15, x12
+ adcs x22, x23, x22
+ umulh x23, x15, x11
+ adcs x20, x23, x20
+ umulh x23, x15, x10
+ adcs x7, x23, x7
+ umulh x23, x15, x9
+ adcs x6, x23, x6
+ umulh x23, x15, x8
+ mul x15, x15, x14
+ adcs x23, x23, xzr
+ cmn x15, x16
+ adcs x15, x19, x1
+ adcs x16, x21, x4
+ adcs x1, x22, x5
+ adcs x2, x20, x2
+ adcs x3, x7, x3
+ adcs x17, x6, x17
+ adcs x18, x23, x18
+ adcs x4, x24, xzr
+ subs x14, x15, x14
+ sbcs x13, x16, x13
+ sbcs x12, x1, x12
+ sbcs x11, x2, x11
+ sbcs x10, x3, x10
+ sbcs x9, x17, x9
+ sbcs x8, x18, x8
+ sbcs x4, x4, xzr
+ tst x4, #0x1
+ csel x14, x15, x14, ne
+ csel x13, x16, x13, ne
+ csel x12, x1, x12, ne
+ csel x11, x2, x11, ne
+ csel x10, x3, x10, ne
+ csel x9, x17, x9, ne
+ csel x8, x18, x8, ne
+ stp x14, x13, [x0]
+ stp x12, x11, [x0, #16]
+ stp x10, x9, [x0, #32]
+ str x8, [x0, #48]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 2
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: // @mcl_fp_addPre7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x3, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ adds x16, x16, x18
+ str x16, [x0]
+ adcs x16, x17, x2
+ adcs x14, x14, x3
+ stp x16, x14, [x0, #8]
+ adcs x14, x15, x1
+ adcs x10, x10, x12
+ stp x14, x10, [x0, #24]
+ adcs x10, x11, x13
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 2
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: // @mcl_fp_subPre7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x3, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ subs x16, x18, x16
+ str x16, [x0]
+ sbcs x16, x2, x17
+ sbcs x14, x3, x14
+ stp x16, x14, [x0, #8]
+ sbcs x14, x1, x15
+ sbcs x10, x12, x10
+ stp x14, x10, [x0, #24]
+ sbcs x10, x13, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 2
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: // @mcl_fp_shr1_7L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x14, x10, [x1, #40]
+ ldp x11, x12, [x1, #16]
+ ldr x13, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x11, x9, #1
+ extr x11, x12, x11, #1
+ extr x12, x13, x12, #1
+ extr x13, x14, x13, #1
+ extr x14, x10, x14, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ stp x11, x12, [x0, #16]
+ stp x13, x14, [x0, #32]
+ str x10, [x0, #48]
+ ret
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 2
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: // @mcl_fp_add7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ adds x16, x16, x18
+ ldp x5, x18, [x3, #40]
+ adcs x17, x17, x2
+ adcs x2, x14, x4
+ ldr x4, [x3, #32]
+ adcs x15, x15, x1
+ adcs x10, x10, x12
+ ldp x12, x1, [x3]
+ stp x16, x17, [x0]
+ stp x2, x15, [x0, #16]
+ adcs x6, x11, x13
+ stp x10, x6, [x0, #32]
+ adcs x8, x8, x9
+ str x8, [x0, #48]
+ adcs x7, xzr, xzr
+ ldp x9, x11, [x3, #16]
+ subs x14, x16, x12
+ sbcs x13, x17, x1
+ sbcs x12, x2, x9
+ sbcs x11, x15, x11
+ sbcs x10, x10, x4
+ sbcs x9, x6, x5
+ sbcs x8, x8, x18
+ sbcs x15, x7, xzr
+ and w15, w15, #0x1
+ tbnz w15, #0, .LBB104_2
+// BB#1: // %nocarry
+ stp x14, x13, [x0]
+ stp x12, x11, [x0, #16]
+ stp x10, x9, [x0, #32]
+ str x8, [x0, #48]
+.LBB104_2: // %carry
+ ret
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 2
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: // @mcl_fp_addNF7L
+// BB#0:
+ ldp x11, x8, [x1, #40]
+ ldp x13, x9, [x2, #40]
+ ldp x15, x10, [x1, #24]
+ ldp x17, x14, [x1, #8]
+ ldr x16, [x1]
+ ldp x18, x1, [x2]
+ ldr x4, [x2, #16]
+ ldp x2, x12, [x2, #24]
+ adds x16, x18, x16
+ adcs x17, x1, x17
+ adcs x14, x4, x14
+ ldp x4, x18, [x3, #40]
+ adcs x15, x2, x15
+ adcs x10, x12, x10
+ ldp x12, x2, [x3]
+ adcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x1, [x3, #24]
+ adcs x8, x9, x8
+ subs x9, x16, x12
+ sbcs x12, x17, x2
+ sbcs x13, x14, x13
+ sbcs x2, x15, x3
+ sbcs x1, x10, x1
+ sbcs x3, x11, x4
+ sbcs x18, x8, x18
+ asr x4, x18, #63
+ cmp x4, #0 // =0
+ csel x9, x16, x9, lt
+ csel x12, x17, x12, lt
+ csel x13, x14, x13, lt
+ csel x14, x15, x2, lt
+ csel x10, x10, x1, lt
+ csel x11, x11, x3, lt
+ csel x8, x8, x18, lt
+ stp x9, x12, [x0]
+ stp x13, x14, [x0, #16]
+ stp x10, x11, [x0, #32]
+ str x8, [x0, #48]
+ ret
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 2
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: // @mcl_fp_sub7L
+// BB#0:
+ ldp x13, x14, [x2, #40]
+ ldp x17, x15, [x1, #40]
+ ldp x11, x12, [x2, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x8, x18, x8
+ sbcs x9, x2, x9
+ stp x8, x9, [x0]
+ sbcs x10, x4, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x16, x12
+ sbcs x13, x17, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x15, x14
+ str x14, [x0, #48]
+ ngcs x15, xzr
+ and w15, w15, #0x1
+ tbnz w15, #0, .LBB106_2
+// BB#1: // %nocarry
+ ret
+.LBB106_2: // %carry
+ ldp x16, x17, [x3]
+ ldp x18, x1, [x3, #16]
+ ldr x2, [x3, #32]
+ ldp x3, x15, [x3, #40]
+ adds x8, x16, x8
+ adcs x9, x17, x9
+ adcs x10, x18, x10
+ adcs x11, x1, x11
+ adcs x12, x2, x12
+ adcs x13, x3, x13
+ adcs x14, x15, x14
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ str x14, [x0, #48]
+ ret
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 2
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: // @mcl_fp_subNF7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ subs x16, x18, x16
+ sbcs x17, x2, x17
+ sbcs x14, x4, x14
+ ldp x4, x18, [x3, #40]
+ sbcs x15, x1, x15
+ sbcs x10, x12, x10
+ ldp x12, x1, [x3]
+ sbcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x2, [x3, #24]
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ and x1, x9, x1
+ and x13, x9, x13
+ and x3, x9, x3
+ and x2, x9, x2
+ and x4, x9, x4
+ and x18, x9, x18
+ extr x9, x9, x8, #63
+ and x9, x9, x12
+ adds x9, x9, x16
+ str x9, [x0]
+ adcs x9, x1, x17
+ str x9, [x0, #8]
+ adcs x9, x13, x14
+ str x9, [x0, #16]
+ adcs x9, x3, x15
+ str x9, [x0, #24]
+ adcs x9, x2, x10
+ str x9, [x0, #32]
+ adcs x9, x4, x11
+ adcs x8, x18, x8
+ stp x9, x8, [x0, #40]
+ ret
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 2
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: // @mcl_fpDbl_add7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldp x8, x9, [x2, #96]
+ ldp x10, x11, [x1, #96]
+ ldp x12, x13, [x2, #80]
+ ldp x14, x15, [x1, #80]
+ ldp x16, x17, [x2, #64]
+ ldp x18, x4, [x1, #64]
+ ldp x5, x6, [x2, #48]
+ ldp x7, x19, [x1, #48]
+ ldp x20, x21, [x2, #32]
+ ldp x22, x23, [x1, #32]
+ ldp x24, x25, [x2, #16]
+ ldp x27, x2, [x2]
+ ldp x28, x29, [x1, #16]
+ ldp x26, x1, [x1]
+ adds x26, x27, x26
+ ldr x27, [x3, #48]
+ str x26, [x0]
+ adcs x1, x2, x1
+ ldp x2, x26, [x3, #32]
+ str x1, [x0, #8]
+ adcs x1, x24, x28
+ ldp x24, x28, [x3, #16]
+ str x1, [x0, #16]
+ ldp x1, x3, [x3]
+ adcs x25, x25, x29
+ adcs x20, x20, x22
+ stp x25, x20, [x0, #24]
+ adcs x20, x21, x23
+ adcs x5, x5, x7
+ stp x20, x5, [x0, #40]
+ adcs x5, x6, x19
+ adcs x16, x16, x18
+ adcs x17, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x5, x1
+ sbcs x14, x16, x3
+ sbcs x15, x17, x24
+ sbcs x18, x12, x28
+ sbcs x1, x13, x2
+ sbcs x2, x8, x26
+ sbcs x3, x9, x27
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x5, x11, ne
+ csel x11, x16, x14, ne
+ csel x14, x17, x15, ne
+ csel x12, x12, x18, ne
+ csel x13, x13, x1, ne
+ csel x8, x8, x2, ne
+ csel x9, x9, x3, ne
+ stp x10, x11, [x0, #56]
+ stp x14, x12, [x0, #72]
+ stp x13, x8, [x0, #88]
+ str x9, [x0, #104]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 2
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: // @mcl_fpDbl_sub7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldp x9, x8, [x2, #96]
+ ldp x11, x10, [x1, #96]
+ ldp x12, x13, [x2, #80]
+ ldp x14, x15, [x1, #80]
+ ldp x16, x17, [x2, #64]
+ ldp x18, x4, [x1, #64]
+ ldp x5, x6, [x2, #48]
+ ldp x7, x19, [x1, #48]
+ ldp x20, x21, [x2, #32]
+ ldp x22, x23, [x1, #32]
+ ldp x24, x25, [x2, #16]
+ ldp x26, x2, [x2]
+ ldp x28, x29, [x1, #16]
+ ldp x27, x1, [x1]
+ subs x26, x27, x26
+ ldr x27, [x3, #48]
+ str x26, [x0]
+ sbcs x1, x1, x2
+ ldp x2, x26, [x3, #32]
+ str x1, [x0, #8]
+ sbcs x1, x28, x24
+ ldp x24, x28, [x3, #16]
+ str x1, [x0, #16]
+ ldp x1, x3, [x3]
+ sbcs x25, x29, x25
+ sbcs x20, x22, x20
+ stp x25, x20, [x0, #24]
+ sbcs x20, x23, x21
+ sbcs x5, x7, x5
+ stp x20, x5, [x0, #40]
+ sbcs x5, x19, x6
+ sbcs x16, x18, x16
+ sbcs x17, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x9, x11, x9
+ sbcs x8, x10, x8
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x27, xzr, ne
+ csel x11, x26, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x28, xzr, ne
+ csel x18, x24, xzr, ne
+ csel x2, x3, xzr, ne
+ csel x1, x1, xzr, ne
+ adds x1, x1, x5
+ adcs x16, x2, x16
+ stp x1, x16, [x0, #56]
+ adcs x16, x18, x17
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #72]
+ adcs x12, x14, x13
+ adcs x9, x11, x9
+ stp x12, x9, [x0, #88]
+ adcs x8, x10, x8
+ str x8, [x0, #104]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 2
+ .type .LmulPv512x64,@function
+.LmulPv512x64: // @mulPv512x64
+// BB#0:
+ ldr x9, [x0]
+ mul x10, x9, x1
+ str x10, [x8]
+ ldr x10, [x0, #8]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adds x9, x9, x11
+ str x9, [x8, #8]
+ ldr x9, [x0, #16]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #16]
+ ldr x10, [x0, #24]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #24]
+ ldr x9, [x0, #32]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #32]
+ ldr x10, [x0, #40]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #40]
+ ldr x9, [x0, #48]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #48]
+ ldr x10, [x0, #56]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ umulh x10, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #56]
+ adcs x9, x10, xzr
+ str x9, [x8, #64]
+ ret
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 2
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: // @mcl_fp_mulUnitPre8L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #80 // =80
+ mov x19, x0
+ mov x8, sp
+ mov x0, x1
+ mov x1, x2
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #56]
+ ldp x11, x10, [sp, #40]
+ ldp x16, x12, [sp, #24]
+ ldp x13, x14, [sp]
+ ldr x15, [sp, #16]
+ stp x13, x14, [x19]
+ stp x15, x16, [x19, #16]
+ stp x12, x11, [x19, #32]
+ stp x10, x9, [x19, #48]
+ str x8, [x19, #64]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 2
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: // @mcl_fpDbl_mulPre8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #144 // =144
+ mov x20, x2
+ mov x21, x1
+ mov x19, x0
+ bl mcl_fpDbl_mulPre4L
+ add x0, x19, #64 // =64
+ add x1, x21, #32 // =32
+ add x2, x20, #32 // =32
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x20, #48]
+ ldp x10, x11, [x20, #32]
+ ldp x12, x13, [x20]
+ ldp x14, x15, [x20, #16]
+ adds x18, x12, x10
+ str x18, [sp, #8] // 8-byte Folded Spill
+ ldp x10, x12, [x21, #16]
+ ldp x16, x17, [x21, #48]
+ adcs x22, x13, x11
+ ldp x11, x13, [x21]
+ adcs x23, x14, x8
+ ldp x8, x14, [x21, #32]
+ stp x18, x22, [sp, #16]
+ adcs x21, x15, x9
+ stp x23, x21, [sp, #32]
+ adcs x24, xzr, xzr
+ adds x25, x11, x8
+ adcs x26, x13, x14
+ stp x25, x26, [sp, #48]
+ adcs x27, x10, x16
+ adcs x28, x12, x17
+ stp x27, x28, [sp, #64]
+ adcs x20, xzr, xzr
+ add x0, sp, #80 // =80
+ add x1, sp, #48 // =48
+ add x2, sp, #16 // =16
+ bl mcl_fpDbl_mulPre4L
+ cmp x24, #0 // =0
+ csel x8, x28, xzr, ne
+ and x9, x24, x20
+ ldp x11, x10, [sp, #128]
+ ldp x13, x12, [sp, #112]
+ ldp x14, x15, [x19, #48]
+ ldp x16, x17, [x19, #32]
+ ldp x18, x0, [x19, #16]
+ csel x1, x27, xzr, ne
+ csel x2, x26, xzr, ne
+ csel x3, x25, xzr, ne
+ cmp x20, #0 // =0
+ ldp x4, x5, [x19]
+ csel x6, x21, xzr, ne
+ csel x7, x23, xzr, ne
+ csel x20, x22, xzr, ne
+ ldr x21, [sp, #8] // 8-byte Folded Reload
+ csel x21, x21, xzr, ne
+ adds x3, x21, x3
+ adcs x2, x20, x2
+ ldp x20, x21, [sp, #96]
+ adcs x1, x7, x1
+ adcs x8, x6, x8
+ adcs x6, xzr, xzr
+ adds x13, x3, x13
+ ldp x3, x7, [sp, #80]
+ adcs x12, x2, x12
+ adcs x11, x1, x11
+ ldp x1, x2, [x19, #112]
+ adcs x8, x8, x10
+ adcs x9, x6, x9
+ ldp x10, x6, [x19, #96]
+ subs x3, x3, x4
+ sbcs x4, x7, x5
+ ldp x5, x7, [x19, #80]
+ sbcs x18, x20, x18
+ sbcs x0, x21, x0
+ ldp x20, x21, [x19, #64]
+ sbcs x13, x13, x16
+ sbcs x12, x12, x17
+ sbcs x11, x11, x14
+ sbcs x8, x8, x15
+ sbcs x9, x9, xzr
+ subs x3, x3, x20
+ sbcs x4, x4, x21
+ sbcs x18, x18, x5
+ sbcs x0, x0, x7
+ sbcs x13, x13, x10
+ sbcs x12, x12, x6
+ sbcs x11, x11, x1
+ sbcs x8, x8, x2
+ sbcs x9, x9, xzr
+ adds x16, x16, x3
+ str x16, [x19, #32]
+ adcs x16, x17, x4
+ adcs x14, x14, x18
+ stp x16, x14, [x19, #40]
+ adcs x14, x15, x0
+ adcs x13, x20, x13
+ stp x14, x13, [x19, #56]
+ adcs x12, x21, x12
+ adcs x11, x5, x11
+ stp x12, x11, [x19, #72]
+ adcs x8, x7, x8
+ str x8, [x19, #88]
+ adcs x8, x10, x9
+ str x8, [x19, #96]
+ adcs x8, x6, xzr
+ str x8, [x19, #104]
+ adcs x8, x1, xzr
+ str x8, [x19, #112]
+ adcs x8, x2, xzr
+ str x8, [x19, #120]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 2
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: // @mcl_fpDbl_sqrPre8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #128 // =128
+ mov x20, x1
+ mov x19, x0
+ mov x2, x20
+ bl mcl_fpDbl_mulPre4L
+ add x0, x19, #64 // =64
+ add x1, x20, #32 // =32
+ mov x2, x1
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x20, #16]
+ ldp x10, x11, [x20, #32]
+ ldp x12, x13, [x20]
+ ldp x14, x15, [x20, #48]
+ adds x22, x12, x10
+ adcs x23, x13, x11
+ adcs x20, x8, x14
+ adcs x21, x9, x15
+ stp x22, x23, [sp, #32]
+ stp x22, x23, [sp]
+ stp x20, x21, [sp, #48]
+ stp x20, x21, [sp, #16]
+ adcs x24, xzr, xzr
+ add x0, sp, #64 // =64
+ add x1, sp, #32 // =32
+ mov x2, sp
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x19, #48]
+ ldp x10, x11, [x19]
+ ldp x12, x13, [sp, #64]
+ ldp x14, x15, [x19, #16]
+ ldp x16, x17, [sp, #80]
+ ldp x18, x0, [x19, #32]
+ subs x10, x12, x10
+ ldp x1, x12, [sp, #96]
+ sbcs x11, x13, x11
+ sbcs x14, x16, x14
+ ldp x13, x16, [sp, #112]
+ sbcs x15, x17, x15
+ sbcs x17, x1, x18
+ ldp x1, x2, [x19, #64]
+ ldp x3, x4, [x19, #80]
+ ldp x5, x6, [x19, #96]
+ ldp x7, x25, [x19, #112]
+ lsr x26, x21, #63
+ sbcs x12, x12, x0
+ sbcs x13, x13, x8
+ sbcs x16, x16, x9
+ sbcs x27, x24, xzr
+ subs x10, x10, x1
+ sbcs x11, x11, x2
+ sbcs x14, x14, x3
+ sbcs x15, x15, x4
+ sbcs x17, x17, x5
+ sbcs x12, x12, x6
+ sbcs x13, x13, x7
+ sbcs x16, x16, x25
+ sbcs x27, x27, xzr
+ adds x22, x22, x22
+ adcs x23, x23, x23
+ adcs x20, x20, x20
+ adcs x21, x21, x21
+ cmp x24, #0 // =0
+ csel x24, x26, xzr, ne
+ csel x21, x21, xzr, ne
+ csel x20, x20, xzr, ne
+ csel x23, x23, xzr, ne
+ csel x22, x22, xzr, ne
+ adds x17, x17, x22
+ adcs x12, x12, x23
+ adcs x13, x13, x20
+ adcs x16, x16, x21
+ adcs x20, x27, x24
+ adds x10, x10, x18
+ str x10, [x19, #32]
+ adcs x10, x11, x0
+ adcs x8, x14, x8
+ stp x10, x8, [x19, #40]
+ adcs x8, x15, x9
+ str x8, [x19, #56]
+ adcs x8, x17, x1
+ str x8, [x19, #64]
+ adcs x8, x12, x2
+ str x8, [x19, #72]
+ adcs x8, x13, x3
+ str x8, [x19, #80]
+ adcs x8, x16, x4
+ str x8, [x19, #88]
+ adcs x8, x20, x5
+ str x8, [x19, #96]
+ adcs x8, x6, xzr
+ str x8, [x19, #104]
+ adcs x8, x7, xzr
+ str x8, [x19, #112]
+ adcs x8, x25, xzr
+ str x8, [x19, #120]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 2
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: // @mcl_fp_mont8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1424 // =1424
+ mov x20, x3
+ mov x26, x2
+ str x26, [sp, #120] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #136] // 8-byte Folded Spill
+ ldr x9, [x26]
+ mov x27, x1
+ str x27, [sp, #128] // 8-byte Folded Spill
+ str x0, [sp, #112] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x27
+ mov x1, x9
+ bl .LmulPv512x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-96]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x19, x28, [x29, #-208]
+ ldp x21, x23, [x29, #-224]
+ ldp x25, x22, [x29, #-240]
+ ldr x1, [x26, #8]
+ add x8, sp, #1184 // =1184
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x25, x24
+ ldr x8, [sp, #1248]
+ ldr x9, [sp, #1240]
+ ldp x10, x12, [sp, #48]
+ adcs x10, x22, x10
+ ldr x11, [sp, #1232]
+ adcs x12, x21, x12
+ ldr x13, [sp, #1224]
+ ldp x14, x16, [sp, #64]
+ adcs x14, x23, x14
+ ldr x15, [sp, #1216]
+ adcs x16, x19, x16
+ ldr x17, [sp, #1208]
+ ldp x18, x1, [sp, #80]
+ adcs x18, x28, x18
+ ldr x0, [sp, #1200]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1184]
+ ldp x3, x5, [sp, #96]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1192]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ adcs x6, xzr, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1168]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1144]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x25, [sp, #1136]
+ ldr x26, [sp, #1128]
+ ldr x27, [sp, #1120]
+ ldr x21, [sp, #1112]
+ ldr x28, [sp, #1104]
+ ldp x24, x23, [sp, #120]
+ ldr x1, [x24, #16]
+ add x8, sp, #1024 // =1024
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #1088]
+ ldr x9, [sp, #1080]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #1072]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #1064]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1056]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1048]
+ ldp x0, x2, [sp, #8]
+ adcs x18, x18, x0
+ ldr x0, [sp, #1040]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x2
+ ldr x2, [sp, #1024]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1032]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x22
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1008]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #984]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x25, [sp, #976]
+ ldr x26, [sp, #968]
+ ldr x27, [sp, #960]
+ ldr x21, [sp, #952]
+ ldr x28, [sp, #944]
+ mov x22, x24
+ ldr x1, [x22, #24]
+ add x8, sp, #864 // =864
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #928]
+ ldr x9, [sp, #920]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #912]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #904]
+ adcs x14, x14, x26
+ ldr x15, [sp, #896]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #888]
+ ldp x0, x2, [sp, #8]
+ adcs x18, x18, x0
+ ldr x0, [sp, #880]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x2
+ ldr x2, [sp, #864]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldr x4, [sp, #872]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x23
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #848]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x24, [sp, #824]
+ ldr x25, [sp, #816]
+ ldr x26, [sp, #808]
+ ldr x27, [sp, #800]
+ ldr x21, [sp, #792]
+ ldr x28, [sp, #784]
+ ldr x1, [x22, #32]
+ add x8, sp, #704 // =704
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #768]
+ ldr x9, [sp, #760]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #752]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #744]
+ adcs x14, x14, x26
+ ldr x15, [sp, #736]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #728]
+ adcs x18, x18, x24
+ ldr x0, [sp, #720]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #704]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #712]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x23
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #688]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x24, [sp, #664]
+ ldr x25, [sp, #656]
+ ldr x26, [sp, #648]
+ ldr x27, [sp, #640]
+ ldr x21, [sp, #632]
+ ldr x28, [sp, #624]
+ ldr x23, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x23, #40]
+ add x8, sp, #544 // =544
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #608]
+ ldr x9, [sp, #600]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #592]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #584]
+ adcs x14, x14, x26
+ ldr x15, [sp, #576]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #568]
+ adcs x18, x18, x24
+ ldr x0, [sp, #560]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #544]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #552]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #528]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #496]
+ ldp x27, x26, [sp, #480]
+ ldp x28, x21, [sp, #464]
+ ldr x1, [x23, #48]
+ add x8, sp, #384 // =384
+ ldr x23, [sp, #128] // 8-byte Folded Reload
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #440]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #424]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #408]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #384]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldp x4, x0, [sp, #392]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #368]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x22, x8, [sp, #352]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #336]
+ ldp x27, x26, [sp, #320]
+ ldp x28, x21, [sp, #304]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #224 // =224
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #280]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #264]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #248]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x22
+ ldr x2, [sp, #224]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldp x4, x0, [sp, #232]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x21, x12, x4
+ adcs x22, x14, x0
+ adcs x23, x16, x17
+ adcs x24, x18, x15
+ adcs x25, x1, x13
+ adcs x10, x3, x11
+ str x10, [sp, #128] // 8-byte Folded Spill
+ adcs x27, x5, x9
+ adcs x28, x6, x8
+ adcs x26, xzr, xzr
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x15, x8, [sp, #200]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldr x14, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ ldp x16, x17, [x20, #48]
+ ldp x18, x0, [x20, #32]
+ ldp x1, x2, [x20, #16]
+ ldp x3, x4, [x20]
+ ldr x5, [sp, #128] // 8-byte Folded Reload
+ adcs x14, x5, x14
+ adcs x15, x27, x15
+ adcs x8, x28, x8
+ adcs x5, x26, xzr
+ subs x3, x10, x3
+ sbcs x4, x11, x4
+ sbcs x1, x12, x1
+ sbcs x2, x13, x2
+ sbcs x18, x9, x18
+ sbcs x0, x14, x0
+ sbcs x16, x15, x16
+ sbcs x17, x8, x17
+ sbcs x5, x5, xzr
+ tst x5, #0x1
+ csel x10, x10, x3, ne
+ csel x11, x11, x4, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x2, ne
+ csel x9, x9, x18, ne
+ csel x14, x14, x0, ne
+ csel x15, x15, x16, ne
+ csel x8, x8, x17, ne
+ ldr x16, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x16]
+ stp x12, x13, [x16, #16]
+ stp x9, x14, [x16, #32]
+ stp x15, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 2
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: // @mcl_fp_montNF8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1424 // =1424
+ mov x20, x3
+ mov x26, x2
+ str x26, [sp, #128] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #136] // 8-byte Folded Spill
+ ldr x9, [x26]
+ mov x27, x1
+ stp x0, x27, [sp, #112]
+ sub x8, x29, #160 // =160
+ mov x0, x27
+ mov x1, x9
+ bl .LmulPv512x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-96]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x19, x28, [x29, #-208]
+ ldp x21, x23, [x29, #-224]
+ ldp x25, x22, [x29, #-240]
+ ldr x1, [x26, #8]
+ add x8, sp, #1184 // =1184
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x25, x24
+ ldr x8, [sp, #1248]
+ ldr x9, [sp, #1240]
+ ldp x10, x12, [sp, #48]
+ adcs x10, x22, x10
+ ldr x11, [sp, #1232]
+ adcs x12, x21, x12
+ ldr x13, [sp, #1224]
+ ldp x14, x16, [sp, #64]
+ adcs x14, x23, x14
+ ldr x15, [sp, #1216]
+ adcs x16, x19, x16
+ ldr x17, [sp, #1208]
+ ldp x18, x1, [sp, #80]
+ adcs x18, x28, x18
+ ldr x0, [sp, #1192]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1184]
+ ldp x3, x5, [sp, #96]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1200]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x27, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x27
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1168]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1144]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #1136]
+ ldr x24, [sp, #1128]
+ ldr x25, [sp, #1120]
+ ldr x21, [sp, #1112]
+ ldr x26, [sp, #1104]
+ ldp x22, x28, [sp, #120]
+ ldr x1, [x28, #16]
+ add x8, sp, #1024 // =1024
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #1088]
+ ldr x9, [sp, #1080]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #1072]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #1064]
+ adcs x14, x14, x24
+ ldr x15, [sp, #1056]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #1048]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #1032]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1024]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1040]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x27
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1008]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #984]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #976]
+ ldr x24, [sp, #968]
+ ldr x25, [sp, #960]
+ ldr x21, [sp, #952]
+ ldr x26, [sp, #944]
+ ldr x1, [x28, #24]
+ add x8, sp, #864 // =864
+ mov x27, x22
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #928]
+ ldr x9, [sp, #920]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #912]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #904]
+ adcs x14, x14, x24
+ ldr x15, [sp, #896]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #888]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #872]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #864]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #880]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x28, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x28
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #848]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #824]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #816]
+ ldr x24, [sp, #808]
+ ldr x25, [sp, #800]
+ ldr x21, [sp, #792]
+ ldr x26, [sp, #784]
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ ldr x1, [x22, #32]
+ add x8, sp, #704 // =704
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #768]
+ ldr x9, [sp, #760]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #752]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #744]
+ adcs x14, x14, x24
+ ldr x15, [sp, #736]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #728]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #712]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #704]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #720]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x28
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #688]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #664]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #656]
+ ldr x24, [sp, #648]
+ ldr x25, [sp, #640]
+ ldr x21, [sp, #632]
+ ldr x26, [sp, #624]
+ mov x27, x22
+ ldr x1, [x27, #40]
+ add x8, sp, #544 // =544
+ ldr x28, [sp, #120] // 8-byte Folded Reload
+ mov x0, x28
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #608]
+ ldr x9, [sp, #600]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #592]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #584]
+ adcs x14, x14, x24
+ ldr x15, [sp, #576]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #568]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #552]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #544]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #560]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #528]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x23, x8, [sp, #496]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #480]
+ ldp x26, x21, [sp, #464]
+ ldr x1, [x27, #48]
+ add x8, sp, #384 // =384
+ mov x0, x28
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldp x9, x8, [sp, #440]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #424]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ adcs x14, x14, x24
+ ldp x17, x15, [sp, #408]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldp x2, x0, [sp, #384]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #400]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x27, x8, [sp, #360]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x22, x28, [sp, #344]
+ ldp x24, x23, [sp, #328]
+ ldp x21, x25, [sp, #312]
+ ldr x26, [sp, #304]
+ ldp x0, x8, [sp, #120]
+ ldr x1, [x8, #56]
+ add x8, sp, #224 // =224
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldp x9, x8, [sp, #280]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #264]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ adcs x14, x14, x24
+ ldp x17, x15, [sp, #248]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ adcs x18, x18, x22
+ ldp x2, x0, [sp, #224]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x28
+ adcs x3, x3, x27
+ ldr x4, [sp, #240]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x21, x12, x0
+ adcs x22, x14, x4
+ adcs x23, x16, x17
+ adcs x24, x18, x15
+ adcs x25, x1, x13
+ adcs x26, x3, x11
+ adcs x27, x5, x9
+ adcs x28, x8, xzr
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x15, x8, [sp, #200]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldr x14, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ ldp x16, x17, [x20, #48]
+ ldp x18, x0, [x20, #32]
+ ldp x1, x2, [x20, #16]
+ ldp x3, x4, [x20]
+ adcs x14, x26, x14
+ adcs x15, x27, x15
+ adcs x8, x28, x8
+ subs x3, x10, x3
+ sbcs x4, x11, x4
+ sbcs x1, x12, x1
+ sbcs x2, x13, x2
+ sbcs x18, x9, x18
+ sbcs x0, x14, x0
+ sbcs x16, x15, x16
+ sbcs x17, x8, x17
+ cmp x17, #0 // =0
+ csel x10, x10, x3, lt
+ csel x11, x11, x4, lt
+ csel x12, x12, x1, lt
+ csel x13, x13, x2, lt
+ csel x9, x9, x18, lt
+ csel x14, x14, x0, lt
+ csel x15, x15, x16, lt
+ csel x8, x8, x17, lt
+ ldr x16, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x16]
+ stp x12, x13, [x16, #16]
+ stp x9, x14, [x16, #32]
+ stp x15, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 2
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: // @mcl_fp_montRed8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #800 // =800
+ mov x20, x2
+ ldur x9, [x20, #-8]
+ str x9, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [x20, #48]
+ str x8, [sp, #144] // 8-byte Folded Spill
+ ldr x8, [x20, #56]
+ str x8, [sp, #152] // 8-byte Folded Spill
+ ldr x8, [x20, #32]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldr x8, [x20, #40]
+ str x8, [sp, #128] // 8-byte Folded Spill
+ ldr x8, [x20, #16]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [x20, #24]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldr x8, [x20]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [x20, #8]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [x1, #112]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [x1, #120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x1, #96]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [x1, #104]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [x1, #80]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [x1, #88]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldp x28, x8, [x1, #64]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x22, x25, [x1, #48]
+ ldp x24, x19, [x1, #32]
+ ldp x27, x26, [x1, #16]
+ ldp x21, x23, [x1]
+ str x0, [sp, #136] // 8-byte Folded Spill
+ mul x1, x21, x9
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [x29, #-104]
+ ldp x11, x10, [x29, #-120]
+ ldp x16, x12, [x29, #-136]
+ ldp x13, x14, [x29, #-160]
+ ldur x15, [x29, #-144]
+ cmn x21, x13
+ adcs x21, x23, x14
+ adcs x13, x27, x15
+ adcs x26, x26, x16
+ adcs x24, x24, x12
+ adcs x11, x19, x11
+ stp x11, x13, [sp, #8]
+ adcs x22, x22, x10
+ adcs x25, x25, x9
+ adcs x27, x28, x8
+ ldr x8, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x8, xzr
+ ldp x19, x8, [sp, #32]
+ adcs x23, x8, xzr
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ adcs x8, xzr, xzr
+ str x8, [sp, #40] // 8-byte Folded Spill
+ mul x1, x21, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [x29, #-184]
+ ldp x11, x10, [x29, #-200]
+ ldp x16, x12, [x29, #-216]
+ ldp x13, x14, [x29, #-240]
+ ldur x15, [x29, #-224]
+ cmn x21, x13
+ ldr x13, [sp, #16] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x26, x15
+ str x13, [sp, #24] // 8-byte Folded Spill
+ adcs x24, x24, x16
+ ldr x13, [sp, #8] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ str x12, [sp, #16] // 8-byte Folded Spill
+ adcs x22, x22, x11
+ adcs x25, x25, x10
+ adcs x27, x27, x9
+ adcs x28, x28, x8
+ adcs x23, x23, xzr
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x26, x8, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #40] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #560 // =560
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #624]
+ ldr x9, [sp, #616]
+ ldr x10, [sp, #608]
+ ldr x11, [sp, #600]
+ ldr x12, [sp, #592]
+ ldr x13, [sp, #560]
+ ldr x14, [sp, #568]
+ ldr x15, [sp, #576]
+ ldr x16, [sp, #584]
+ cmn x21, x13
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x24, x15
+ str x13, [sp, #40] // 8-byte Folded Spill
+ ldr x13, [sp, #16] // 8-byte Folded Reload
+ adcs x13, x13, x16
+ str x13, [sp, #24] // 8-byte Folded Spill
+ adcs x22, x22, x12
+ adcs x25, x25, x11
+ adcs x27, x27, x10
+ adcs x28, x28, x9
+ adcs x23, x23, x8
+ adcs x26, x26, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x24, x8, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #480 // =480
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #544]
+ ldr x9, [sp, #536]
+ ldr x10, [sp, #528]
+ ldr x11, [sp, #520]
+ ldr x12, [sp, #512]
+ ldp x13, x14, [sp, #480]
+ ldp x15, x16, [sp, #496]
+ cmn x21, x13
+ ldr x13, [sp, #40] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x13, x13, x15
+ adcs x22, x22, x16
+ adcs x25, x25, x12
+ adcs x27, x27, x11
+ adcs x28, x28, x10
+ adcs x23, x23, x9
+ adcs x26, x26, x8
+ adcs x24, x24, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ stp x13, x8, [sp, #48]
+ mul x1, x21, x19
+ add x8, sp, #400 // =400
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #456]
+ ldp x11, x10, [sp, #440]
+ ldp x16, x12, [sp, #424]
+ ldp x13, x14, [sp, #400]
+ ldr x15, [sp, #416]
+ cmn x21, x13
+ ldr x13, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x22, x15
+ str x13, [sp, #48] // 8-byte Folded Spill
+ adcs x25, x25, x16
+ adcs x27, x27, x12
+ adcs x28, x28, x11
+ adcs x23, x23, x10
+ adcs x26, x26, x9
+ adcs x24, x24, x8
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x22, x8, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #320 // =320
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #376]
+ ldp x11, x10, [sp, #360]
+ ldp x16, x12, [sp, #344]
+ ldp x13, x14, [sp, #320]
+ ldr x15, [sp, #336]
+ cmn x21, x13
+ ldr x13, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x25, x15
+ adcs x27, x27, x16
+ adcs x28, x28, x12
+ adcs x23, x23, x11
+ adcs x26, x26, x10
+ adcs x24, x24, x9
+ ldr x9, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ stp x13, x8, [sp, #56]
+ adcs x22, x22, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x25, x8, xzr
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #296]
+ ldp x11, x10, [sp, #280]
+ ldp x16, x12, [sp, #264]
+ ldp x13, x14, [sp, #240]
+ ldr x15, [sp, #256]
+ cmn x21, x13
+ ldr x13, [sp, #56] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x27, x15
+ adcs x28, x28, x16
+ adcs x23, x23, x12
+ adcs x26, x26, x11
+ adcs x24, x24, x10
+ ldr x10, [sp, #64] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ stp x9, x13, [sp, #64]
+ adcs x22, x22, x8
+ adcs x25, x25, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x27, x8, xzr
+ mul x1, x21, x19
+ add x8, sp, #160 // =160
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #216]
+ ldp x11, x10, [sp, #200]
+ ldp x16, x12, [sp, #184]
+ ldp x13, x14, [sp, #160]
+ ldr x15, [sp, #176]
+ cmn x21, x13
+ ldr x13, [sp, #72] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, x28, x15
+ adcs x15, x23, x16
+ adcs x12, x26, x12
+ adcs x11, x24, x11
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x10, x16, x10
+ adcs x9, x22, x9
+ adcs x8, x25, x8
+ adcs x16, x27, xzr
+ ldp x17, x18, [sp, #88]
+ subs x17, x13, x17
+ sbcs x18, x14, x18
+ ldp x0, x1, [sp, #104]
+ sbcs x0, x15, x0
+ sbcs x1, x12, x1
+ ldp x2, x3, [sp, #120]
+ sbcs x2, x11, x2
+ sbcs x3, x10, x3
+ ldp x4, x5, [sp, #144]
+ sbcs x4, x9, x4
+ sbcs x5, x8, x5
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x13, x13, x17, ne
+ csel x14, x14, x18, ne
+ csel x15, x15, x0, ne
+ csel x12, x12, x1, ne
+ csel x11, x11, x2, ne
+ csel x10, x10, x3, ne
+ csel x9, x9, x4, ne
+ csel x8, x8, x5, ne
+ ldr x16, [sp, #136] // 8-byte Folded Reload
+ stp x13, x14, [x16]
+ stp x15, x12, [x16, #16]
+ stp x11, x10, [x16, #32]
+ stp x9, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 2
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: // @mcl_fp_addPre8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x3, x4, [x1]
+ ldp x5, x1, [x1, #16]
+ adds x18, x18, x3
+ str x18, [x0]
+ adcs x18, x2, x4
+ adcs x16, x16, x5
+ stp x18, x16, [x0, #8]
+ adcs x16, x17, x1
+ adcs x12, x12, x14
+ stp x16, x12, [x0, #24]
+ adcs x12, x13, x15
+ adcs x8, x8, x10
+ stp x12, x8, [x0, #40]
+ adcs x9, x9, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 2
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: // @mcl_fp_subPre8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x3, x4, [x1]
+ ldp x5, x1, [x1, #16]
+ subs x18, x3, x18
+ str x18, [x0]
+ sbcs x18, x4, x2
+ sbcs x16, x5, x16
+ stp x18, x16, [x0, #8]
+ sbcs x16, x1, x17
+ sbcs x12, x14, x12
+ stp x16, x12, [x0, #24]
+ sbcs x12, x15, x13
+ sbcs x8, x10, x8
+ stp x12, x8, [x0, #40]
+ sbcs x9, x11, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 2
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: // @mcl_fp_shr1_8L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x15, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x12, x9, #1
+ extr x12, x13, x12, #1
+ extr x13, x14, x13, #1
+ extr x14, x15, x14, #1
+ extr x15, x10, x15, #1
+ extr x10, x11, x10, #1
+ lsr x11, x11, #1
+ stp x8, x9, [x0]
+ stp x12, x13, [x0, #16]
+ stp x14, x15, [x0, #32]
+ stp x10, x11, [x0, #48]
+ ret
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 2
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: // @mcl_fp_add8L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x4, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ adds x18, x18, x4
+ adcs x2, x2, x5
+ ldp x4, x5, [x3, #48]
+ adcs x16, x16, x6
+ adcs x17, x17, x1
+ ldp x1, x6, [x3, #32]
+ adcs x7, x12, x14
+ adcs x19, x13, x15
+ ldp x12, x13, [x3]
+ stp x18, x2, [x0]
+ stp x16, x17, [x0, #16]
+ stp x7, x19, [x0, #32]
+ adcs x8, x8, x10
+ adcs x20, x9, x11
+ stp x8, x20, [x0, #48]
+ adcs x21, xzr, xzr
+ ldp x9, x10, [x3, #16]
+ subs x15, x18, x12
+ sbcs x14, x2, x13
+ sbcs x13, x16, x9
+ sbcs x12, x17, x10
+ sbcs x11, x7, x1
+ sbcs x10, x19, x6
+ sbcs x9, x8, x4
+ sbcs x8, x20, x5
+ sbcs x16, x21, xzr
+ and w16, w16, #0x1
+ tbnz w16, #0, .LBB120_2
+// BB#1: // %nocarry
+ stp x15, x14, [x0]
+ stp x13, x12, [x0, #16]
+ stp x11, x10, [x0, #32]
+ stp x9, x8, [x0, #48]
+.LBB120_2: // %carry
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 2
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: // @mcl_fp_addNF8L
+// BB#0:
+ ldp x8, x9, [x1, #48]
+ ldp x10, x11, [x2, #48]
+ ldp x12, x13, [x1, #32]
+ ldp x14, x15, [x2, #32]
+ ldp x16, x17, [x1, #16]
+ ldp x18, x1, [x1]
+ ldp x4, x5, [x2]
+ ldp x6, x2, [x2, #16]
+ adds x18, x4, x18
+ adcs x1, x5, x1
+ ldp x4, x5, [x3, #48]
+ adcs x16, x6, x16
+ adcs x17, x2, x17
+ ldp x2, x6, [x3, #32]
+ adcs x12, x14, x12
+ adcs x13, x15, x13
+ ldp x14, x15, [x3]
+ adcs x8, x10, x8
+ ldp x10, x3, [x3, #16]
+ adcs x9, x11, x9
+ subs x11, x18, x14
+ sbcs x14, x1, x15
+ sbcs x10, x16, x10
+ sbcs x15, x17, x3
+ sbcs x2, x12, x2
+ sbcs x3, x13, x6
+ sbcs x4, x8, x4
+ sbcs x5, x9, x5
+ cmp x5, #0 // =0
+ csel x11, x18, x11, lt
+ csel x14, x1, x14, lt
+ csel x10, x16, x10, lt
+ csel x15, x17, x15, lt
+ csel x12, x12, x2, lt
+ csel x13, x13, x3, lt
+ csel x8, x8, x4, lt
+ csel x9, x9, x5, lt
+ stp x11, x14, [x0]
+ stp x10, x15, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x8, x9, [x0, #48]
+ ret
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 2
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: // @mcl_fp_sub8L
+// BB#0:
+ ldp x14, x15, [x2, #48]
+ ldp x16, x17, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x2, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ subs x8, x2, x8
+ sbcs x9, x5, x9
+ stp x8, x9, [x0]
+ sbcs x10, x6, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x18, x12
+ sbcs x13, x4, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x16, x14
+ sbcs x15, x17, x15
+ stp x14, x15, [x0, #48]
+ ngcs x16, xzr
+ and w16, w16, #0x1
+ tbnz w16, #0, .LBB122_2
+// BB#1: // %nocarry
+ ret
+.LBB122_2: // %carry
+ ldp x16, x17, [x3, #48]
+ ldp x18, x1, [x3]
+ ldp x2, x4, [x3, #16]
+ ldp x5, x3, [x3, #32]
+ adds x8, x18, x8
+ adcs x9, x1, x9
+ adcs x10, x2, x10
+ adcs x11, x4, x11
+ adcs x12, x5, x12
+ adcs x13, x3, x13
+ adcs x14, x16, x14
+ adcs x15, x17, x15
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ ret
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 2
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: // @mcl_fp_subNF8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x4, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ subs x18, x4, x18
+ sbcs x2, x5, x2
+ ldp x4, x5, [x3, #48]
+ sbcs x16, x6, x16
+ sbcs x17, x1, x17
+ ldp x1, x6, [x3, #32]
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ ldp x14, x15, [x3, #16]
+ sbcs x8, x10, x8
+ ldp x10, x3, [x3]
+ sbcs x9, x11, x9
+ asr x11, x9, #63
+ and x10, x11, x10
+ and x3, x11, x3
+ and x14, x11, x14
+ and x15, x11, x15
+ and x1, x11, x1
+ and x6, x11, x6
+ and x4, x11, x4
+ and x11, x11, x5
+ adds x10, x10, x18
+ str x10, [x0]
+ adcs x10, x3, x2
+ str x10, [x0, #8]
+ adcs x10, x14, x16
+ str x10, [x0, #16]
+ adcs x10, x15, x17
+ str x10, [x0, #24]
+ adcs x10, x1, x12
+ str x10, [x0, #32]
+ adcs x10, x6, x13
+ adcs x8, x4, x8
+ stp x10, x8, [x0, #40]
+ adcs x8, x11, x9
+ str x8, [x0, #56]
+ ret
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 2
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: // @mcl_fpDbl_add8L
+// BB#0:
+ ldp x8, x9, [x2, #112]
+ ldp x10, x11, [x1, #112]
+ ldp x12, x13, [x2, #96]
+ ldp x14, x15, [x1, #96]
+ ldp x16, x5, [x2]
+ ldp x17, x6, [x1]
+ ldp x18, x4, [x2, #80]
+ adds x16, x16, x17
+ ldr x17, [x1, #16]
+ str x16, [x0]
+ adcs x16, x5, x6
+ ldp x5, x6, [x2, #16]
+ str x16, [x0, #8]
+ adcs x17, x5, x17
+ ldp x16, x5, [x1, #24]
+ str x17, [x0, #16]
+ adcs x16, x6, x16
+ ldp x17, x6, [x2, #32]
+ str x16, [x0, #24]
+ adcs x17, x17, x5
+ ldp x16, x5, [x1, #40]
+ str x17, [x0, #32]
+ adcs x16, x6, x16
+ ldp x17, x6, [x2, #48]
+ str x16, [x0, #40]
+ ldr x16, [x1, #56]
+ adcs x17, x17, x5
+ ldp x5, x2, [x2, #64]
+ str x17, [x0, #48]
+ adcs x16, x6, x16
+ ldp x17, x6, [x1, #64]
+ str x16, [x0, #56]
+ ldp x16, x1, [x1, #80]
+ adcs x17, x5, x17
+ adcs x2, x2, x6
+ ldp x5, x6, [x3, #48]
+ adcs x16, x18, x16
+ adcs x18, x4, x1
+ ldp x1, x4, [x3, #32]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ ldp x14, x15, [x3, #16]
+ adcs x8, x8, x10
+ ldp x10, x3, [x3]
+ adcs x9, x9, x11
+ adcs x11, xzr, xzr
+ subs x10, x17, x10
+ sbcs x3, x2, x3
+ sbcs x14, x16, x14
+ sbcs x15, x18, x15
+ sbcs x1, x12, x1
+ sbcs x4, x13, x4
+ sbcs x5, x8, x5
+ sbcs x6, x9, x6
+ sbcs x11, x11, xzr
+ tst x11, #0x1
+ csel x10, x17, x10, ne
+ csel x11, x2, x3, ne
+ csel x14, x16, x14, ne
+ csel x15, x18, x15, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x4, ne
+ csel x8, x8, x5, ne
+ csel x9, x9, x6, ne
+ stp x10, x11, [x0, #64]
+ stp x14, x15, [x0, #80]
+ stp x12, x13, [x0, #96]
+ stp x8, x9, [x0, #112]
+ ret
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 2
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: // @mcl_fpDbl_sub8L
+// BB#0:
+ ldp x10, x8, [x2, #112]
+ ldp x11, x9, [x1, #112]
+ ldp x12, x13, [x2, #96]
+ ldp x14, x15, [x1, #96]
+ ldp x16, x5, [x1]
+ ldp x17, x4, [x2]
+ ldr x18, [x1, #80]
+ subs x16, x16, x17
+ ldr x17, [x1, #16]
+ str x16, [x0]
+ sbcs x16, x5, x4
+ ldp x4, x5, [x2, #16]
+ str x16, [x0, #8]
+ sbcs x17, x17, x4
+ ldp x16, x4, [x1, #24]
+ str x17, [x0, #16]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #32]
+ str x16, [x0, #24]
+ sbcs x17, x4, x17
+ ldp x16, x4, [x1, #40]
+ str x17, [x0, #32]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #48]
+ str x16, [x0, #40]
+ sbcs x17, x4, x17
+ ldp x16, x4, [x1, #56]
+ str x17, [x0, #48]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #64]
+ str x16, [x0, #56]
+ ldr x16, [x1, #72]
+ sbcs x17, x4, x17
+ ldp x4, x2, [x2, #80]
+ ldr x1, [x1, #88]
+ sbcs x16, x16, x5
+ sbcs x18, x18, x4
+ ldp x4, x5, [x3, #48]
+ sbcs x1, x1, x2
+ sbcs x12, x14, x12
+ ldp x14, x2, [x3, #32]
+ sbcs x13, x15, x13
+ sbcs x10, x11, x10
+ ldp x11, x15, [x3, #16]
+ sbcs x8, x9, x8
+ ngcs x9, xzr
+ tst x9, #0x1
+ ldp x9, x3, [x3]
+ csel x5, x5, xzr, ne
+ csel x4, x4, xzr, ne
+ csel x2, x2, xzr, ne
+ csel x14, x14, xzr, ne
+ csel x15, x15, xzr, ne
+ csel x11, x11, xzr, ne
+ csel x3, x3, xzr, ne
+ csel x9, x9, xzr, ne
+ adds x9, x9, x17
+ str x9, [x0, #64]
+ adcs x9, x3, x16
+ str x9, [x0, #72]
+ adcs x9, x11, x18
+ str x9, [x0, #80]
+ adcs x9, x15, x1
+ str x9, [x0, #88]
+ adcs x9, x14, x12
+ str x9, [x0, #96]
+ adcs x9, x2, x13
+ str x9, [x0, #104]
+ adcs x9, x4, x10
+ adcs x8, x5, x8
+ stp x9, x8, [x0, #112]
+ ret
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 2
+ .type .LmulPv576x64,@function
+.LmulPv576x64: // @mulPv576x64
+// BB#0:
+ ldr x9, [x0]
+ mul x10, x9, x1
+ str x10, [x8]
+ ldr x10, [x0, #8]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adds x9, x9, x11
+ str x9, [x8, #8]
+ ldr x9, [x0, #16]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #16]
+ ldr x10, [x0, #24]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #24]
+ ldr x9, [x0, #32]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #32]
+ ldr x10, [x0, #40]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #40]
+ ldr x9, [x0, #48]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #48]
+ ldr x10, [x0, #56]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #56]
+ ldr x9, [x0, #64]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ umulh x9, x9, x1
+ adcs x10, x10, x11
+ adcs x9, x9, xzr
+ stp x10, x9, [x8, #64]
+ ret
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 2
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: // @mcl_fp_mulUnitPre9L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #80 // =80
+ mov x19, x0
+ mov x8, sp
+ mov x0, x1
+ mov x1, x2
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #64]
+ ldp x11, x10, [sp, #48]
+ ldp x13, x12, [sp, #32]
+ ldp x14, x15, [sp]
+ ldp x16, x17, [sp, #16]
+ stp x14, x15, [x19]
+ stp x16, x17, [x19, #16]
+ stp x13, x12, [x19, #32]
+ stp x11, x10, [x19, #48]
+ stp x9, x8, [x19, #64]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 2
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: // @mcl_fpDbl_mulPre9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #752 // =752
+ mov x21, x2
+ ldr x9, [x21]
+ mov x20, x1
+ mov x19, x0
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x8, [x29, #-88]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [x29, #-112]
+ ldp x27, x26, [x29, #-128]
+ ldp x22, x28, [x29, #-144]
+ ldp x8, x23, [x29, #-160]
+ ldr x1, [x21, #8]
+ str x8, [x19]
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x16, [x29, #-240]
+ ldp x17, x15, [x29, #-224]
+ adds x14, x14, x23
+ str x14, [x19, #8]
+ adcs x22, x16, x22
+ adcs x23, x17, x28
+ adcs x27, x15, x27
+ adcs x26, x13, x26
+ adcs x25, x12, x25
+ adcs x24, x11, x24
+ ldr x1, [x21, #16]
+ ldr x11, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x10, x11
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #512 // =512
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #584]
+ ldr x9, [sp, #576]
+ ldr x10, [sp, #568]
+ ldr x11, [sp, #560]
+ ldr x12, [sp, #552]
+ ldr x13, [sp, #544]
+ ldr x14, [sp, #512]
+ ldr x15, [sp, #536]
+ ldr x16, [sp, #520]
+ ldr x17, [sp, #528]
+ adds x14, x22, x14
+ str x14, [x19, #16]
+ adcs x22, x23, x16
+ adcs x23, x27, x17
+ adcs x26, x26, x15
+ adcs x25, x25, x13
+ adcs x24, x24, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #24]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #432 // =432
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #496]
+ ldp x11, x10, [sp, #480]
+ ldp x13, x12, [sp, #464]
+ ldp x14, x16, [sp, #432]
+ ldp x17, x15, [sp, #448]
+ adds x14, x22, x14
+ str x14, [x19, #24]
+ adcs x22, x23, x16
+ adcs x23, x26, x17
+ adcs x25, x25, x15
+ adcs x24, x24, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #32]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #352 // =352
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #416]
+ ldp x11, x10, [sp, #400]
+ ldp x13, x12, [sp, #384]
+ ldp x14, x16, [sp, #352]
+ ldp x17, x15, [sp, #368]
+ adds x14, x22, x14
+ str x14, [x19, #32]
+ adcs x22, x23, x16
+ adcs x23, x25, x17
+ adcs x24, x24, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #40]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #272 // =272
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #336]
+ ldp x11, x10, [sp, #320]
+ ldp x13, x12, [sp, #304]
+ ldp x14, x16, [sp, #272]
+ ldp x17, x15, [sp, #288]
+ adds x14, x22, x14
+ str x14, [x19, #40]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #48]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #192 // =192
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #256]
+ ldp x11, x10, [sp, #240]
+ ldp x13, x12, [sp, #224]
+ ldp x14, x16, [sp, #192]
+ ldp x17, x15, [sp, #208]
+ adds x14, x22, x14
+ str x14, [x19, #48]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #56]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #112 // =112
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #176]
+ ldp x11, x10, [sp, #160]
+ ldp x13, x12, [sp, #144]
+ ldp x14, x16, [sp, #112]
+ ldp x17, x15, [sp, #128]
+ adds x14, x22, x14
+ str x14, [x19, #56]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #64]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x21, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #24] // 8-byte Folded Spill
+ add x8, sp, #32 // =32
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #96]
+ ldp x11, x10, [sp, #80]
+ ldp x13, x12, [sp, #64]
+ ldp x14, x16, [sp, #32]
+ ldp x17, x15, [sp, #48]
+ adds x14, x22, x14
+ str x14, [x19, #64]
+ adcs x14, x23, x16
+ str x14, [x19, #72]
+ adcs x14, x24, x17
+ str x14, [x19, #80]
+ adcs x14, x25, x15
+ adcs x13, x26, x13
+ stp x14, x13, [x19, #88]
+ adcs x12, x27, x12
+ adcs x11, x21, x11
+ stp x12, x11, [x19, #104]
+ adcs x10, x28, x10
+ str x10, [x19, #120]
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x9, x8, [x19, #128]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 2
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: // @mcl_fpDbl_sqrPre9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #736 // =736
+ mov x20, x1
+ ldr x1, [x20]
+ mov x19, x0
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-88]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x23, x22, [x29, #-104]
+ ldp x25, x24, [x29, #-120]
+ ldp x27, x26, [x29, #-136]
+ ldp x21, x28, [x29, #-152]
+ ldur x8, [x29, #-160]
+ ldr x1, [x20, #8]
+ str x8, [x19]
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x16, [x29, #-240]
+ ldp x17, x15, [x29, #-224]
+ adds x14, x14, x21
+ str x14, [x19, #8]
+ adcs x21, x16, x28
+ adcs x27, x17, x27
+ adcs x26, x15, x26
+ adcs x25, x13, x25
+ adcs x24, x12, x24
+ adcs x23, x11, x23
+ ldr x1, [x20, #16]
+ adcs x22, x10, x22
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x9, x10
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #496 // =496
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #568]
+ ldr x9, [sp, #560]
+ ldr x10, [sp, #552]
+ ldr x11, [sp, #544]
+ ldr x12, [sp, #536]
+ ldr x13, [sp, #528]
+ ldp x14, x16, [sp, #496]
+ ldr x15, [sp, #520]
+ ldr x17, [sp, #512]
+ adds x14, x21, x14
+ str x14, [x19, #16]
+ adcs x21, x27, x16
+ adcs x26, x26, x17
+ adcs x25, x25, x15
+ adcs x24, x24, x13
+ adcs x23, x23, x12
+ adcs x22, x22, x11
+ ldr x1, [x20, #24]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #416 // =416
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #480]
+ ldp x11, x10, [sp, #464]
+ ldp x13, x12, [sp, #448]
+ ldp x14, x16, [sp, #416]
+ ldp x17, x15, [sp, #432]
+ adds x14, x21, x14
+ str x14, [x19, #24]
+ adcs x21, x26, x16
+ adcs x25, x25, x17
+ adcs x24, x24, x15
+ adcs x23, x23, x13
+ adcs x22, x22, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #32]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #336 // =336
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #400]
+ ldp x11, x10, [sp, #384]
+ ldp x13, x12, [sp, #368]
+ ldp x14, x16, [sp, #336]
+ ldp x17, x15, [sp, #352]
+ adds x14, x21, x14
+ str x14, [x19, #32]
+ adcs x21, x25, x16
+ adcs x24, x24, x17
+ adcs x23, x23, x15
+ adcs x22, x22, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #40]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #256 // =256
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #320]
+ ldp x11, x10, [sp, #304]
+ ldp x13, x12, [sp, #288]
+ ldp x14, x16, [sp, #256]
+ ldp x17, x15, [sp, #272]
+ adds x14, x21, x14
+ str x14, [x19, #40]
+ adcs x21, x24, x16
+ adcs x23, x23, x17
+ adcs x22, x22, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #48]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #176 // =176
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #240]
+ ldp x11, x10, [sp, #224]
+ ldp x13, x12, [sp, #208]
+ ldp x14, x16, [sp, #176]
+ ldp x17, x15, [sp, #192]
+ adds x14, x21, x14
+ str x14, [x19, #48]
+ adcs x21, x23, x16
+ adcs x22, x22, x17
+ adcs x23, x24, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #56]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #96 // =96
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #160]
+ ldp x11, x10, [sp, #144]
+ ldp x13, x12, [sp, #128]
+ ldp x14, x16, [sp, #96]
+ ldp x17, x15, [sp, #112]
+ adds x14, x21, x14
+ str x14, [x19, #56]
+ adcs x21, x22, x16
+ adcs x22, x23, x17
+ adcs x23, x24, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #64]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #16 // =16
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #80]
+ ldp x11, x10, [sp, #64]
+ ldp x13, x12, [sp, #48]
+ ldp x14, x16, [sp, #16]
+ ldp x17, x15, [sp, #32]
+ adds x14, x21, x14
+ str x14, [x19, #64]
+ adcs x14, x22, x16
+ str x14, [x19, #72]
+ adcs x14, x23, x17
+ str x14, [x19, #80]
+ adcs x14, x24, x15
+ adcs x13, x25, x13
+ stp x14, x13, [x19, #88]
+ adcs x12, x26, x12
+ adcs x11, x27, x11
+ stp x12, x11, [x19, #104]
+ adcs x10, x28, x10
+ str x10, [x19, #120]
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x9, x8, [x19, #128]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 2
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: // @mcl_fp_mont9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1600 // =1600
+ mov x20, x3
+ mov x28, x2
+ str x28, [sp, #136] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #144] // 8-byte Folded Spill
+ ldr x9, [x28]
+ mov x23, x1
+ str x23, [sp, #152] // 8-byte Folded Spill
+ str x0, [sp, #128] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x23
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-88]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-168]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x21, x19, [x29, #-208]
+ ldp x26, x22, [x29, #-224]
+ ldp x27, x25, [x29, #-240]
+ ldr x1, [x28, #8]
+ add x8, sp, #1360 // =1360
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x27, x24
+ ldr x8, [sp, #1432]
+ ldr x9, [sp, #1424]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x25, x10
+ ldr x11, [sp, #1416]
+ ldp x12, x14, [sp, #64]
+ adcs x12, x26, x12
+ ldr x13, [sp, #1408]
+ adcs x14, x22, x14
+ ldr x15, [sp, #1400]
+ ldp x16, x18, [sp, #80]
+ adcs x16, x21, x16
+ ldr x17, [sp, #1392]
+ adcs x18, x19, x18
+ ldr x0, [sp, #1384]
+ ldp x1, x3, [sp, #96]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1376]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1360]
+ ldp x5, x7, [sp, #112]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ ldr x6, [sp, #1368]
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x7, x19, x7
+ adcs x19, xzr, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x24
+ add x8, sp, #1280 // =1280
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1352]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1344]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1336]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1328]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1320]
+ ldr x27, [sp, #1312]
+ ldr x28, [sp, #1304]
+ ldr x22, [sp, #1296]
+ ldr x19, [sp, #1288]
+ ldr x23, [sp, #1280]
+ ldr x25, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x25, #16]
+ add x8, sp, #1200 // =1200
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #1272]
+ ldr x9, [sp, #1264]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #1256]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #1248]
+ adcs x14, x14, x28
+ ldr x15, [sp, #1240]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #1232]
+ adcs x18, x18, x26
+ ldr x0, [sp, #1224]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1216]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1200]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1208]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x24
+ add x8, sp, #1120 // =1120
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1192]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1176]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1168]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1160]
+ ldr x27, [sp, #1152]
+ ldr x28, [sp, #1144]
+ ldr x22, [sp, #1136]
+ ldr x19, [sp, #1128]
+ ldr x23, [sp, #1120]
+ ldr x1, [x25, #24]
+ add x8, sp, #1040 // =1040
+ ldr x24, [sp, #152] // 8-byte Folded Reload
+ mov x0, x24
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #1112]
+ ldr x9, [sp, #1104]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #1096]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #1088]
+ adcs x14, x14, x28
+ ldr x15, [sp, #1080]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #1072]
+ adcs x18, x18, x26
+ ldr x0, [sp, #1064]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1056]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1040]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1048]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x8, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x8
+ add x8, sp, #960 // =960
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1032]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1024]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1016]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1008]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1000]
+ ldr x27, [sp, #992]
+ ldr x28, [sp, #984]
+ ldr x22, [sp, #976]
+ ldr x19, [sp, #968]
+ ldr x23, [sp, #960]
+ ldr x1, [x25, #32]
+ add x8, sp, #880 // =880
+ mov x0, x24
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #952]
+ ldr x9, [sp, #944]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #936]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #928]
+ adcs x14, x14, x28
+ ldr x15, [sp, #920]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #912]
+ adcs x18, x18, x26
+ ldr x0, [sp, #904]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #896]
+ adcs x3, x3, x4
+ ldr x4, [sp, #880]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #888]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x25, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x25
+ add x8, sp, #800 // =800
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #872]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #864]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #856]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #848]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #840]
+ ldr x27, [sp, #832]
+ ldr x28, [sp, #824]
+ ldr x22, [sp, #816]
+ ldr x19, [sp, #808]
+ ldr x23, [sp, #800]
+ ldr x24, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x24, #40]
+ add x8, sp, #720 // =720
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #792]
+ ldr x9, [sp, #784]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #776]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #768]
+ adcs x14, x14, x28
+ ldr x15, [sp, #760]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #752]
+ adcs x18, x18, x26
+ ldr x0, [sp, #744]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #736]
+ adcs x3, x3, x4
+ ldr x4, [sp, #720]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #728]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x25
+ add x8, sp, #640 // =640
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #712]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #704]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #696]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #688]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #680]
+ ldr x27, [sp, #672]
+ ldr x28, [sp, #664]
+ ldr x22, [sp, #656]
+ ldr x19, [sp, #648]
+ ldr x23, [sp, #640]
+ ldr x1, [x24, #48]
+ add x8, sp, #560 // =560
+ ldr x25, [sp, #152] // 8-byte Folded Reload
+ mov x0, x25
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #632]
+ ldr x9, [sp, #624]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #616]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #608]
+ adcs x14, x14, x28
+ ldr x15, [sp, #600]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #592]
+ adcs x18, x18, x26
+ ldr x0, [sp, #584]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #576]
+ adcs x3, x3, x4
+ ldr x4, [sp, #560]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #568]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x24
+ add x8, sp, #480 // =480
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #552]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #544]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #536]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #528]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #520]
+ ldr x27, [sp, #512]
+ ldp x22, x28, [sp, #496]
+ ldp x23, x19, [sp, #480]
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #400 // =400
+ mov x0, x25
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldp x9, x8, [sp, #464]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldp x13, x11, [sp, #448]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ adcs x14, x14, x28
+ ldp x17, x15, [sp, #432]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ adcs x18, x18, x26
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldp x2, x0, [sp, #416]
+ adcs x3, x3, x4
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldp x4, x6, [sp, #400]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x24
+ add x8, sp, #320 // =320
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #392]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x24, x8, [sp, #376]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x26, x25, [sp, #360]
+ ldp x28, x27, [sp, #344]
+ ldp x19, x22, [sp, #328]
+ ldr x23, [sp, #320]
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x8, #64]
+ add x8, sp, #240 // =240
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldp x9, x8, [sp, #304]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldp x13, x11, [sp, #288]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ adcs x14, x14, x28
+ ldp x17, x15, [sp, #272]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ adcs x18, x18, x26
+ ldp x2, x0, [sp, #256]
+ ldp x3, x1, [sp, #64]
+ adcs x1, x1, x25
+ adcs x3, x3, x24
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldp x4, x6, [sp, #240]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x22, x12, x6
+ adcs x23, x14, x2
+ adcs x24, x16, x0
+ adcs x25, x18, x17
+ adcs x26, x1, x15
+ adcs x27, x3, x13
+ adcs x10, x5, x11
+ str x10, [sp, #152] // 8-byte Folded Spill
+ adcs x9, x7, x9
+ str x9, [sp, #136] // 8-byte Folded Spill
+ adcs x19, x19, x8
+ adcs x28, xzr, xzr
+ ldr x8, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x8
+ add x8, sp, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x16, x8, [sp, #224]
+ ldp x9, x10, [sp, #160]
+ ldp x11, x12, [sp, #176]
+ cmn x21, x9
+ ldp x13, x9, [sp, #192]
+ adcs x10, x22, x10
+ ldp x14, x15, [sp, #208]
+ adcs x11, x23, x11
+ adcs x12, x24, x12
+ adcs x13, x25, x13
+ adcs x9, x26, x9
+ adcs x14, x27, x14
+ ldp x0, x17, [x20, #56]
+ ldp x2, x18, [x20, #40]
+ ldp x4, x1, [x20, #24]
+ ldp x6, x3, [x20, #8]
+ ldr x5, [x20]
+ ldr x7, [sp, #152] // 8-byte Folded Reload
+ adcs x15, x7, x15
+ ldr x7, [sp, #136] // 8-byte Folded Reload
+ adcs x16, x7, x16
+ adcs x8, x19, x8
+ adcs x7, x28, xzr
+ subs x5, x10, x5
+ sbcs x6, x11, x6
+ sbcs x3, x12, x3
+ sbcs x4, x13, x4
+ sbcs x1, x9, x1
+ sbcs x2, x14, x2
+ sbcs x18, x15, x18
+ sbcs x0, x16, x0
+ sbcs x17, x8, x17
+ sbcs x7, x7, xzr
+ tst x7, #0x1
+ csel x10, x10, x5, ne
+ csel x11, x11, x6, ne
+ csel x12, x12, x3, ne
+ csel x13, x13, x4, ne
+ csel x9, x9, x1, ne
+ csel x14, x14, x2, ne
+ csel x15, x15, x18, ne
+ csel x16, x16, x0, ne
+ csel x8, x8, x17, ne
+ ldr x17, [sp, #128] // 8-byte Folded Reload
+ stp x10, x11, [x17]
+ stp x12, x13, [x17, #16]
+ stp x9, x14, [x17, #32]
+ stp x15, x16, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 2
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: // @mcl_fp_montNF9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1584 // =1584
+ mov x20, x3
+ mov x28, x2
+ str x28, [sp, #120] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #128] // 8-byte Folded Spill
+ ldr x9, [x28]
+ mov x23, x1
+ str x23, [sp, #136] // 8-byte Folded Spill
+ str x0, [sp, #112] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x23
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-88]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-168]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-176]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x21, x19, [x29, #-208]
+ ldp x26, x22, [x29, #-224]
+ ldp x27, x25, [x29, #-240]
+ ldr x1, [x28, #8]
+ add x8, sp, #1344 // =1344
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x27, x24
+ ldr x8, [sp, #1416]
+ ldr x9, [sp, #1408]
+ ldr x10, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x25, x10
+ ldr x11, [sp, #1400]
+ ldp x12, x14, [sp, #48]
+ adcs x12, x26, x12
+ ldr x13, [sp, #1392]
+ adcs x14, x22, x14
+ ldr x15, [sp, #1384]
+ ldp x16, x18, [sp, #64]
+ adcs x16, x21, x16
+ ldr x17, [sp, #1376]
+ adcs x18, x19, x18
+ ldr x0, [sp, #1368]
+ ldp x1, x3, [sp, #80]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1352]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1344]
+ ldp x5, x7, [sp, #96]
+ ldr x6, [sp, #24] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ ldr x6, [sp, #1360]
+ ldr x19, [sp, #40] // 8-byte Folded Reload
+ adcs x7, x19, x7
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #1264 // =1264
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1336]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1328]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1320]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1312]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #1304]
+ ldr x25, [sp, #1296]
+ ldr x26, [sp, #1288]
+ ldr x21, [sp, #1280]
+ ldr x27, [sp, #1272]
+ ldr x28, [sp, #1264]
+ ldr x23, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x23, #16]
+ add x8, sp, #1184 // =1184
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #1256]
+ ldr x9, [sp, #1248]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #1240]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #1232]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1224]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1216]
+ adcs x18, x18, x24
+ ldr x0, [sp, #1208]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1192]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1184]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1200]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1176]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1168]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #1144]
+ ldr x25, [sp, #1136]
+ ldr x26, [sp, #1128]
+ ldr x21, [sp, #1120]
+ ldr x27, [sp, #1112]
+ ldr x28, [sp, #1104]
+ ldr x1, [x23, #24]
+ add x8, sp, #1024 // =1024
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mov x0, x22
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #1096]
+ ldr x9, [sp, #1088]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #1080]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #1072]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1064]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1056]
+ adcs x18, x18, x24
+ ldr x0, [sp, #1048]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1032]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1024]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1040]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1016]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1008]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #984]
+ ldr x25, [sp, #976]
+ ldr x26, [sp, #968]
+ ldr x21, [sp, #960]
+ ldr x27, [sp, #952]
+ ldr x28, [sp, #944]
+ ldr x1, [x23, #32]
+ add x8, sp, #864 // =864
+ mov x0, x22
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #936]
+ ldr x9, [sp, #928]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #920]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #912]
+ adcs x14, x14, x26
+ ldr x15, [sp, #904]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #896]
+ adcs x18, x18, x24
+ ldr x0, [sp, #888]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #872]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #864]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #880]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x23, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x23
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #856]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #848]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #824]
+ ldr x25, [sp, #816]
+ ldr x26, [sp, #808]
+ ldr x21, [sp, #800]
+ ldr x27, [sp, #792]
+ ldr x28, [sp, #784]
+ ldr x22, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x22, #40]
+ add x8, sp, #704 // =704
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #776]
+ ldr x9, [sp, #768]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #760]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #752]
+ adcs x14, x14, x26
+ ldr x15, [sp, #744]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #736]
+ adcs x18, x18, x24
+ ldr x0, [sp, #728]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #712]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #704]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #720]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x23
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #696]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #688]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #664]
+ ldr x25, [sp, #656]
+ ldr x26, [sp, #648]
+ ldr x21, [sp, #640]
+ ldr x27, [sp, #632]
+ ldr x28, [sp, #624]
+ ldr x1, [x22, #48]
+ add x8, sp, #544 // =544
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #616]
+ ldr x9, [sp, #608]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #600]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #592]
+ adcs x14, x14, x26
+ ldr x15, [sp, #584]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #576]
+ adcs x18, x18, x24
+ ldr x0, [sp, #568]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #552]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #544]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #560]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #536]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #528]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #496]
+ ldp x21, x26, [sp, #480]
+ ldp x28, x27, [sp, #464]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #384 // =384
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #448]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldp x13, x11, [sp, #432]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #416]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldp x4, x2, [sp, #384]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldp x6, x0, [sp, #400]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #376]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x22, x8, [sp, #360]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x24, x23, [sp, #344]
+ ldp x26, x25, [sp, #328]
+ ldp x27, x21, [sp, #312]
+ ldr x28, [sp, #304]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #64]
+ add x8, sp, #224 // =224
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #288]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldp x13, x11, [sp, #272]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #256]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ adcs x1, x1, x23
+ ldp x4, x2, [sp, #224]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x22
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldp x6, x0, [sp, #240]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x21, x12, x2
+ adcs x22, x14, x6
+ adcs x23, x16, x0
+ adcs x24, x18, x17
+ adcs x25, x1, x15
+ adcs x26, x3, x13
+ adcs x10, x5, x11
+ str x10, [sp, #136] // 8-byte Folded Spill
+ adcs x28, x7, x9
+ adcs x27, x8, xzr
+ ldr x8, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x16, x8, [sp, #208]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldp x14, x15, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ adcs x14, x26, x14
+ ldp x0, x17, [x20, #56]
+ ldp x2, x18, [x20, #40]
+ ldp x4, x1, [x20, #24]
+ ldp x6, x3, [x20, #8]
+ ldr x5, [x20]
+ ldr x7, [sp, #136] // 8-byte Folded Reload
+ adcs x15, x7, x15
+ adcs x16, x28, x16
+ adcs x8, x27, x8
+ subs x5, x10, x5
+ sbcs x6, x11, x6
+ sbcs x3, x12, x3
+ sbcs x4, x13, x4
+ sbcs x1, x9, x1
+ sbcs x2, x14, x2
+ sbcs x18, x15, x18
+ sbcs x0, x16, x0
+ sbcs x17, x8, x17
+ asr x7, x17, #63
+ cmp x7, #0 // =0
+ csel x10, x10, x5, lt
+ csel x11, x11, x6, lt
+ csel x12, x12, x3, lt
+ csel x13, x13, x4, lt
+ csel x9, x9, x1, lt
+ csel x14, x14, x2, lt
+ csel x15, x15, x18, lt
+ csel x16, x16, x0, lt
+ csel x8, x8, x17, lt
+ ldr x17, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x17]
+ stp x12, x13, [x17, #16]
+ stp x9, x14, [x17, #32]
+ stp x15, x16, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 2
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: // @mcl_fp_montRed9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #912 // =912
+ mov x20, x2
+ ldur x9, [x20, #-8]
+ str x9, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [x20, #64]
+ str x8, [sp, #184] // 8-byte Folded Spill
+ ldr x8, [x20, #48]
+ str x8, [sp, #168] // 8-byte Folded Spill
+ ldr x8, [x20, #56]
+ str x8, [sp, #176] // 8-byte Folded Spill
+ ldr x8, [x20, #32]
+ str x8, [sp, #144] // 8-byte Folded Spill
+ ldr x8, [x20, #40]
+ str x8, [sp, #152] // 8-byte Folded Spill
+ ldr x8, [x20, #16]
+ str x8, [sp, #128] // 8-byte Folded Spill
+ ldr x8, [x20, #24]
+ str x8, [sp, #136] // 8-byte Folded Spill
+ ldr x8, [x20]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldr x8, [x20, #8]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldr x8, [x1, #128]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [x1, #136]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [x1, #112]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x1, #120]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [x1, #96]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [x1, #104]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [x1, #80]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x8, [x1, #88]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldp x23, x8, [x1, #64]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x19, [x1, #48]
+ ldp x28, x27, [x1, #32]
+ ldp x22, x24, [x1, #16]
+ ldp x21, x26, [x1]
+ str x0, [sp, #160] // 8-byte Folded Spill
+ mul x1, x21, x9
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-96]
+ ldp x11, x10, [x29, #-112]
+ ldp x13, x12, [x29, #-128]
+ ldp x14, x15, [x29, #-160]
+ ldp x16, x17, [x29, #-144]
+ cmn x21, x14
+ adcs x21, x26, x15
+ adcs x14, x22, x16
+ adcs x24, x24, x17
+ adcs x26, x28, x13
+ adcs x27, x27, x12
+ adcs x25, x25, x11
+ adcs x10, x19, x10
+ stp x10, x14, [sp, #24]
+ adcs x23, x23, x9
+ ldr x9, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x9, x8
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x22, x8, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ adcs x8, xzr, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x19, [sp, #40] // 8-byte Folded Reload
+ mul x1, x21, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x15, [x29, #-240]
+ ldp x16, x17, [x29, #-224]
+ cmn x21, x14
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x24, x16
+ adcs x26, x26, x17
+ adcs x27, x27, x13
+ adcs x25, x25, x12
+ ldr x12, [sp, #24] // 8-byte Folded Reload
+ adcs x11, x12, x11
+ stp x11, x14, [sp, #24]
+ adcs x23, x23, x10
+ adcs x28, x28, x9
+ adcs x22, x22, x8
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x24, x8, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #672 // =672
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #744]
+ ldr x9, [sp, #736]
+ ldr x10, [sp, #728]
+ ldr x11, [sp, #720]
+ ldr x12, [sp, #712]
+ ldr x13, [sp, #704]
+ ldr x14, [sp, #672]
+ ldr x15, [sp, #680]
+ ldr x16, [sp, #688]
+ ldr x17, [sp, #696]
+ cmn x21, x14
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x26, x16
+ str x14, [sp, #48] // 8-byte Folded Spill
+ adcs x27, x27, x17
+ adcs x25, x25, x13
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ str x12, [sp, #32] // 8-byte Folded Spill
+ adcs x23, x23, x11
+ adcs x28, x28, x10
+ adcs x22, x22, x9
+ adcs x24, x24, x8
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x26, x8, xzr
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #592 // =592
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #664]
+ ldr x9, [sp, #656]
+ ldr x10, [sp, #648]
+ ldr x11, [sp, #640]
+ ldr x12, [sp, #632]
+ ldr x13, [sp, #624]
+ ldr x14, [sp, #592]
+ ldr x15, [sp, #600]
+ ldr x16, [sp, #608]
+ ldr x17, [sp, #616]
+ cmn x21, x14
+ ldr x14, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x27, x16
+ str x14, [sp, #56] // 8-byte Folded Spill
+ adcs x25, x25, x17
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x13, x14, x13
+ str x13, [sp, #48] // 8-byte Folded Spill
+ adcs x23, x23, x12
+ adcs x28, x28, x11
+ adcs x22, x22, x10
+ adcs x24, x24, x9
+ adcs x26, x26, x8
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x27, x8, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #512 // =512
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #584]
+ ldr x9, [sp, #576]
+ ldr x10, [sp, #568]
+ ldr x11, [sp, #560]
+ ldr x12, [sp, #552]
+ ldr x13, [sp, #544]
+ ldr x14, [sp, #512]
+ ldr x15, [sp, #520]
+ ldr x16, [sp, #528]
+ ldr x17, [sp, #536]
+ cmn x21, x14
+ ldr x14, [sp, #56] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x25, x16
+ str x14, [sp, #64] // 8-byte Folded Spill
+ ldr x14, [sp, #48] // 8-byte Folded Reload
+ adcs x14, x14, x17
+ str x14, [sp, #56] // 8-byte Folded Spill
+ adcs x23, x23, x13
+ adcs x28, x28, x12
+ adcs x22, x22, x11
+ adcs x24, x24, x10
+ adcs x26, x26, x9
+ adcs x27, x27, x8
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x25, x8, xzr
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #432 // =432
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #496]
+ ldp x11, x10, [sp, #480]
+ ldp x13, x12, [sp, #464]
+ ldp x14, x15, [sp, #432]
+ ldp x16, x17, [sp, #448]
+ cmn x21, x14
+ ldr x14, [sp, #64] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ ldr x14, [sp, #56] // 8-byte Folded Reload
+ adcs x14, x14, x16
+ adcs x23, x23, x17
+ adcs x28, x28, x13
+ adcs x22, x22, x12
+ adcs x24, x24, x11
+ adcs x26, x26, x10
+ adcs x27, x27, x9
+ adcs x25, x25, x8
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ stp x14, x8, [sp, #72]
+ mul x1, x21, x19
+ add x8, sp, #352 // =352
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #416]
+ ldp x11, x10, [sp, #400]
+ ldp x13, x12, [sp, #384]
+ ldp x14, x15, [sp, #352]
+ ldp x16, x17, [sp, #368]
+ cmn x21, x14
+ ldr x14, [sp, #72] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x23, x16
+ str x14, [sp, #72] // 8-byte Folded Spill
+ adcs x28, x28, x17
+ adcs x22, x22, x13
+ adcs x24, x24, x12
+ adcs x26, x26, x11
+ adcs x27, x27, x10
+ adcs x25, x25, x9
+ ldr x9, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x23, x8, xzr
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #272 // =272
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #336]
+ ldp x11, x10, [sp, #320]
+ ldp x13, x12, [sp, #304]
+ ldp x14, x15, [sp, #272]
+ ldp x16, x17, [sp, #288]
+ cmn x21, x14
+ ldr x14, [sp, #72] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x28, x16
+ adcs x22, x22, x17
+ adcs x24, x24, x13
+ adcs x26, x26, x12
+ adcs x27, x27, x11
+ adcs x25, x25, x10
+ ldr x10, [sp, #88] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ stp x14, x9, [sp, #80]
+ adcs x23, x23, x8
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x28, x8, xzr
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #192 // =192
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #256]
+ ldp x11, x10, [sp, #240]
+ ldp x13, x12, [sp, #224]
+ ldp x14, x15, [sp, #192]
+ ldp x16, x17, [sp, #208]
+ cmn x21, x14
+ ldr x14, [sp, #80] // 8-byte Folded Reload
+ adcs x14, x14, x15
+ adcs x15, x22, x16
+ adcs x16, x24, x17
+ adcs x13, x26, x13
+ adcs x12, x27, x12
+ adcs x11, x25, x11
+ ldr x17, [sp, #88] // 8-byte Folded Reload
+ adcs x10, x17, x10
+ adcs x9, x23, x9
+ adcs x8, x28, x8
+ ldp x17, x18, [sp, #104]
+ adcs x17, x17, xzr
+ subs x18, x14, x18
+ ldp x0, x1, [sp, #120]
+ sbcs x0, x15, x0
+ sbcs x1, x16, x1
+ ldp x2, x3, [sp, #136]
+ sbcs x2, x13, x2
+ sbcs x3, x12, x3
+ ldr x4, [sp, #152] // 8-byte Folded Reload
+ sbcs x4, x11, x4
+ ldp x5, x6, [sp, #168]
+ sbcs x5, x10, x5
+ sbcs x6, x9, x6
+ ldr x7, [sp, #184] // 8-byte Folded Reload
+ sbcs x7, x8, x7
+ sbcs x17, x17, xzr
+ tst x17, #0x1
+ csel x14, x14, x18, ne
+ csel x15, x15, x0, ne
+ csel x16, x16, x1, ne
+ csel x13, x13, x2, ne
+ csel x12, x12, x3, ne
+ csel x11, x11, x4, ne
+ csel x10, x10, x5, ne
+ csel x9, x9, x6, ne
+ csel x8, x8, x7, ne
+ ldr x17, [sp, #160] // 8-byte Folded Reload
+ stp x14, x15, [x17]
+ stp x16, x13, [x17, #16]
+ stp x12, x11, [x17, #32]
+ stp x10, x9, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 2
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: // @mcl_fp_addPre9L
+// BB#0:
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x3, x14, [x2, #24]
+ ldr x4, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x5, x6, [x1]
+ ldr x7, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ adds x4, x4, x5
+ adcs x2, x2, x6
+ stp x4, x2, [x0]
+ adcs x18, x18, x7
+ str x18, [x0, #16]
+ adcs x18, x3, x1
+ adcs x14, x14, x16
+ stp x18, x14, [x0, #24]
+ adcs x14, x15, x17
+ adcs x10, x10, x12
+ stp x14, x10, [x0, #40]
+ adcs x10, x11, x13
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 2
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: // @mcl_fp_subPre9L
+// BB#0:
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x3, x14, [x2, #24]
+ ldr x4, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x5, x6, [x1]
+ ldr x7, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x4, x5, x4
+ sbcs x2, x6, x2
+ stp x4, x2, [x0]
+ sbcs x18, x7, x18
+ str x18, [x0, #16]
+ sbcs x18, x1, x3
+ sbcs x14, x16, x14
+ stp x18, x14, [x0, #24]
+ sbcs x14, x17, x15
+ sbcs x10, x12, x10
+ stp x14, x10, [x0, #40]
+ sbcs x10, x13, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 2
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: // @mcl_fp_shr1_9L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x12, x10, [x1, #56]
+ ldp x16, x11, [x1, #40]
+ ldp x13, x14, [x1, #16]
+ ldr x15, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x13, x9, #1
+ extr x13, x14, x13, #1
+ extr x14, x15, x14, #1
+ extr x15, x16, x15, #1
+ extr x16, x11, x16, #1
+ extr x11, x12, x11, #1
+ extr x12, x10, x12, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ stp x13, x14, [x0, #16]
+ stp x15, x16, [x0, #32]
+ stp x11, x12, [x0, #48]
+ str x10, [x0, #64]
+ ret
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 2
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: // @mcl_fp_add9L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x4, x14, [x2, #24]
+ ldr x5, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x6, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ adds x5, x5, x6
+ adcs x2, x2, x7
+ adcs x18, x18, x19
+ ldp x21, x7, [x3, #40]
+ ldp x19, x6, [x3, #56]
+ adcs x1, x4, x1
+ adcs x4, x14, x16
+ ldr x20, [x3, #32]
+ adcs x17, x15, x17
+ adcs x10, x10, x12
+ ldp x12, x14, [x3]
+ stp x5, x2, [x0]
+ stp x18, x1, [x0, #16]
+ stp x4, x17, [x0, #32]
+ adcs x22, x11, x13
+ stp x10, x22, [x0, #48]
+ adcs x8, x8, x9
+ str x8, [x0, #64]
+ adcs x23, xzr, xzr
+ ldp x9, x11, [x3, #16]
+ subs x16, x5, x12
+ sbcs x15, x2, x14
+ sbcs x14, x18, x9
+ sbcs x13, x1, x11
+ sbcs x12, x4, x20
+ sbcs x11, x17, x21
+ sbcs x10, x10, x7
+ sbcs x9, x22, x19
+ sbcs x8, x8, x6
+ sbcs x17, x23, xzr
+ and w17, w17, #0x1
+ tbnz w17, #0, .LBB136_2
+// BB#1: // %nocarry
+ stp x16, x15, [x0]
+ stp x14, x13, [x0, #16]
+ stp x12, x11, [x0, #32]
+ stp x10, x9, [x0, #48]
+ str x8, [x0, #64]
+.LBB136_2: // %carry
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 2
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: // @mcl_fp_addNF9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x11, x8, [x1, #56]
+ ldp x13, x9, [x2, #56]
+ ldp x15, x10, [x1, #40]
+ ldp x17, x12, [x2, #40]
+ ldp x4, x14, [x1, #24]
+ ldr x5, [x1]
+ ldp x1, x18, [x1, #8]
+ ldp x6, x7, [x2]
+ ldr x19, [x2, #16]
+ ldp x2, x16, [x2, #24]
+ adds x5, x6, x5
+ adcs x1, x7, x1
+ adcs x18, x19, x18
+ ldp x19, x6, [x3, #56]
+ adcs x2, x2, x4
+ adcs x14, x16, x14
+ ldp x4, x7, [x3, #40]
+ adcs x15, x17, x15
+ adcs x10, x12, x10
+ ldp x12, x17, [x3]
+ adcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x16, [x3, #24]
+ adcs x8, x9, x8
+ subs x9, x5, x12
+ sbcs x12, x1, x17
+ sbcs x13, x18, x13
+ sbcs x17, x2, x3
+ sbcs x16, x14, x16
+ sbcs x3, x15, x4
+ sbcs x4, x10, x7
+ sbcs x7, x11, x19
+ sbcs x6, x8, x6
+ asr x19, x6, #63
+ cmp x19, #0 // =0
+ csel x9, x5, x9, lt
+ csel x12, x1, x12, lt
+ csel x13, x18, x13, lt
+ csel x17, x2, x17, lt
+ csel x14, x14, x16, lt
+ csel x15, x15, x3, lt
+ csel x10, x10, x4, lt
+ csel x11, x11, x7, lt
+ csel x8, x8, x6, lt
+ stp x9, x12, [x0]
+ stp x13, x17, [x0, #16]
+ stp x14, x15, [x0, #32]
+ stp x10, x11, [x0, #48]
+ str x8, [x0, #64]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 2
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: // @mcl_fp_sub9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x15, x16, [x2, #56]
+ ldp x4, x17, [x1, #56]
+ ldp x13, x14, [x2, #40]
+ ldp x6, x18, [x1, #40]
+ ldp x11, x12, [x2, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x2, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x5, [x1, #24]
+ subs x8, x2, x8
+ sbcs x9, x7, x9
+ stp x8, x9, [x0]
+ sbcs x10, x19, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x5, x12
+ sbcs x13, x6, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x18, x14
+ sbcs x15, x4, x15
+ stp x14, x15, [x0, #48]
+ sbcs x16, x17, x16
+ str x16, [x0, #64]
+ ngcs x17, xzr
+ and w17, w17, #0x1
+ tbnz w17, #0, .LBB138_2
+// BB#1: // %nocarry
+ ldp x20, x19, [sp], #16
+ ret
+.LBB138_2: // %carry
+ ldp x18, x1, [x3]
+ ldp x2, x4, [x3, #16]
+ ldp x5, x6, [x3, #32]
+ adds x8, x18, x8
+ adcs x9, x1, x9
+ ldr x18, [x3, #48]
+ ldp x1, x17, [x3, #56]
+ adcs x10, x2, x10
+ adcs x11, x4, x11
+ adcs x12, x5, x12
+ adcs x13, x6, x13
+ adcs x14, x18, x14
+ adcs x15, x1, x15
+ adcs x16, x17, x16
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ str x16, [x0, #64]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 2
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: // @mcl_fp_subNF9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x4, x14, [x2, #24]
+ ldr x5, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x6, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x5, x6, x5
+ sbcs x2, x7, x2
+ sbcs x18, x19, x18
+ ldp x19, x6, [x3, #56]
+ sbcs x1, x1, x4
+ sbcs x14, x16, x14
+ ldp x4, x7, [x3, #40]
+ sbcs x15, x17, x15
+ sbcs x10, x12, x10
+ ldp x12, x17, [x3]
+ sbcs x11, x13, x11
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ extr x13, x9, x8, #63
+ and x12, x13, x12
+ ldr x13, [x3, #16]
+ ldp x3, x16, [x3, #24]
+ and x19, x9, x19
+ and x6, x9, x6
+ ror x9, x9, #63
+ and x17, x9, x17
+ and x13, x9, x13
+ and x3, x9, x3
+ and x16, x9, x16
+ and x4, x9, x4
+ and x9, x9, x7
+ adds x12, x12, x5
+ str x12, [x0]
+ adcs x12, x17, x2
+ str x12, [x0, #8]
+ adcs x12, x13, x18
+ str x12, [x0, #16]
+ adcs x12, x3, x1
+ str x12, [x0, #24]
+ adcs x12, x16, x14
+ str x12, [x0, #32]
+ adcs x12, x4, x15
+ adcs x9, x9, x10
+ stp x12, x9, [x0, #40]
+ adcs x9, x19, x11
+ adcs x8, x6, x8
+ stp x9, x8, [x0, #56]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 2
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: // @mcl_fpDbl_add9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x10, x8, [x2, #128]
+ ldp x11, x9, [x1, #128]
+ ldp x12, x13, [x2, #112]
+ ldp x14, x15, [x1, #112]
+ ldp x16, x17, [x2, #96]
+ ldp x18, x4, [x2]
+ ldp x5, x6, [x1]
+ ldp x7, x19, [x2, #16]
+ adds x18, x18, x5
+ adcs x4, x4, x6
+ ldp x5, x6, [x1, #16]
+ str x18, [x0]
+ adcs x18, x7, x5
+ ldp x5, x7, [x1, #96]
+ str x4, [x0, #8]
+ ldr x4, [x1, #32]
+ str x18, [x0, #16]
+ adcs x18, x19, x6
+ ldp x6, x19, [x2, #32]
+ str x18, [x0, #24]
+ adcs x4, x6, x4
+ ldp x18, x6, [x1, #40]
+ str x4, [x0, #32]
+ adcs x18, x19, x18
+ ldp x4, x19, [x2, #48]
+ str x18, [x0, #40]
+ adcs x4, x4, x6
+ ldp x18, x6, [x1, #56]
+ str x4, [x0, #48]
+ adcs x18, x19, x18
+ ldp x4, x19, [x2, #64]
+ str x18, [x0, #56]
+ ldr x18, [x1, #72]
+ adcs x4, x4, x6
+ ldp x6, x2, [x2, #80]
+ str x4, [x0, #64]
+ ldp x4, x1, [x1, #80]
+ adcs x18, x19, x18
+ adcs x4, x6, x4
+ adcs x1, x2, x1
+ ldp x6, x19, [x3, #56]
+ adcs x16, x16, x5
+ adcs x17, x17, x7
+ ldp x7, x2, [x3, #40]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ ldp x15, x5, [x3, #24]
+ adcs x10, x10, x11
+ ldr x11, [x3]
+ ldp x3, x14, [x3, #8]
+ adcs x8, x8, x9
+ adcs x9, xzr, xzr
+ subs x11, x18, x11
+ sbcs x3, x4, x3
+ sbcs x14, x1, x14
+ sbcs x15, x16, x15
+ sbcs x5, x17, x5
+ sbcs x7, x12, x7
+ sbcs x2, x13, x2
+ sbcs x6, x10, x6
+ sbcs x19, x8, x19
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x9, x18, x11, ne
+ csel x11, x4, x3, ne
+ csel x14, x1, x14, ne
+ csel x15, x16, x15, ne
+ csel x16, x17, x5, ne
+ csel x12, x12, x7, ne
+ csel x13, x13, x2, ne
+ csel x10, x10, x6, ne
+ csel x8, x8, x19, ne
+ stp x9, x11, [x0, #72]
+ stp x14, x15, [x0, #88]
+ stp x16, x12, [x0, #104]
+ stp x13, x10, [x0, #120]
+ str x8, [x0, #136]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 2
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: // @mcl_fpDbl_sub9L
+// BB#0:
+ ldp x10, x8, [x2, #128]
+ ldp x11, x9, [x1, #128]
+ ldp x14, x12, [x2, #112]
+ ldp x15, x13, [x1, #112]
+ ldp x16, x17, [x2]
+ ldp x18, x4, [x1]
+ ldp x5, x6, [x2, #96]
+ ldr x7, [x1, #16]
+ subs x16, x18, x16
+ sbcs x17, x4, x17
+ ldp x18, x4, [x2, #16]
+ str x16, [x0]
+ ldr x16, [x1, #24]
+ sbcs x18, x7, x18
+ str x17, [x0, #8]
+ ldp x17, x7, [x2, #32]
+ str x18, [x0, #16]
+ sbcs x16, x16, x4
+ ldp x18, x4, [x1, #32]
+ str x16, [x0, #24]
+ sbcs x16, x18, x17
+ ldp x17, x18, [x2, #48]
+ str x16, [x0, #32]
+ sbcs x4, x4, x7
+ ldp x16, x7, [x1, #48]
+ str x4, [x0, #40]
+ sbcs x16, x16, x17
+ ldp x17, x4, [x2, #80]
+ str x16, [x0, #48]
+ ldr x16, [x1, #64]
+ sbcs x18, x7, x18
+ ldp x7, x2, [x2, #64]
+ str x18, [x0, #56]
+ ldr x18, [x1, #72]
+ sbcs x16, x16, x7
+ str x16, [x0, #64]
+ ldp x16, x7, [x1, #80]
+ sbcs x18, x18, x2
+ ldp x2, x1, [x1, #96]
+ sbcs x16, x16, x17
+ sbcs x4, x7, x4
+ sbcs x2, x2, x5
+ ldp x7, x17, [x3, #56]
+ sbcs x1, x1, x6
+ sbcs x14, x15, x14
+ ldp x6, x5, [x3, #40]
+ sbcs x12, x13, x12
+ sbcs x10, x11, x10
+ ldp x13, x15, [x3, #24]
+ sbcs x8, x9, x8
+ ngcs x9, xzr
+ tst x9, #0x1
+ ldr x9, [x3]
+ ldp x3, x11, [x3, #8]
+ csel x17, x17, xzr, ne
+ csel x7, x7, xzr, ne
+ csel x5, x5, xzr, ne
+ csel x6, x6, xzr, ne
+ csel x15, x15, xzr, ne
+ csel x13, x13, xzr, ne
+ csel x11, x11, xzr, ne
+ csel x3, x3, xzr, ne
+ csel x9, x9, xzr, ne
+ adds x9, x9, x18
+ str x9, [x0, #72]
+ adcs x9, x3, x16
+ str x9, [x0, #80]
+ adcs x9, x11, x4
+ str x9, [x0, #88]
+ adcs x9, x13, x2
+ str x9, [x0, #96]
+ adcs x9, x15, x1
+ str x9, [x0, #104]
+ adcs x9, x6, x14
+ str x9, [x0, #112]
+ adcs x9, x5, x12
+ str x9, [x0, #120]
+ adcs x9, x7, x10
+ adcs x8, x17, x8
+ stp x9, x8, [x0, #128]
+ ret
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/arm.s b/vendor/github.com/tangerine-network/mcl/src/asm/arm.s
new file mode 100644
index 000000000..2df9bfb92
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/arm.s
@@ -0,0 +1,84189 @@
+ .text
+ .syntax unified
+ .eabi_attribute 67, "2.09" @ Tag_conformance
+ .eabi_attribute 6, 1 @ Tag_CPU_arch
+ .eabi_attribute 8, 1 @ Tag_ARM_ISA_use
+ .eabi_attribute 15, 1 @ Tag_ABI_PCS_RW_data
+ .eabi_attribute 16, 1 @ Tag_ABI_PCS_RO_data
+ .eabi_attribute 17, 2 @ Tag_ABI_PCS_GOT_use
+ .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
+ .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
+ .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
+ .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access
+ .eabi_attribute 24, 1 @ Tag_ABI_align_needed
+ .eabi_attribute 25, 1 @ Tag_ABI_align_preserved
+ .eabi_attribute 28, 1 @ Tag_ABI_VFP_args
+ .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format
+ .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 2
+ .type makeNIST_P192L,%function
+makeNIST_P192L: @ @makeNIST_P192L
+ .fnstart
+@ BB#0:
+ mvn r1, #0
+ mvn r2, #1
+ str r1, [r0]
+ stmib r0, {r1, r2}
+ str r1, [r0, #12]
+ str r1, [r0, #16]
+ str r1, [r0, #20]
+ mov pc, lr
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P192L,%function
+mcl_fpDbl_mod_NIST_P192L: @ @mcl_fpDbl_mod_NIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ add lr, r1, #24
+ ldr r2, [r1, #40]
+ ldr r3, [r1, #44]
+ ldr r7, [r1, #16]
+ ldr r8, [r1, #20]
+ ldm lr, {r4, r5, r6, lr}
+ ldm r1, {r1, r9, r10, r12}
+ adds r11, r4, r1
+ adcs r9, r5, r9
+ adcs r10, r6, r10
+ adcs r1, lr, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ adcs r1, r2, r7
+ mov r7, #0
+ str r1, [sp] @ 4-byte Spill
+ adcs r8, r3, r8
+ mov r1, #0
+ adcs r1, r1, #0
+ adc r12, r7, #0
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adds r11, r11, r2
+ adcs r9, r9, r3
+ adcs r4, r10, r4
+ adcs r5, r7, r5
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r6, r7, r6
+ adcs r7, r8, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r4, r2
+ adcs r3, r5, r3
+ adcs r6, r6, #0
+ adcs r7, r7, #0
+ adcs r1, r1, #0
+ adc r5, r12, #0
+ adds r12, r1, r11
+ adcs r11, r5, r9
+ adcs r10, r1, lr
+ mov r1, #0
+ adcs r8, r5, r3
+ adcs lr, r6, #0
+ adcs r2, r7, #0
+ adc r9, r1, #0
+ adds r7, r12, #1
+ str r2, [sp, #4] @ 4-byte Spill
+ adcs r6, r11, #0
+ adcs r3, r10, #1
+ adcs r5, r8, #0
+ adcs r1, lr, #0
+ adcs r2, r2, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r12
+ movne r6, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r5, r8
+ movne r1, lr
+ str r7, [r0]
+ str r6, [r0, #4]
+ str r3, [r0, #8]
+ str r5, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #20]
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 2
+ .type mcl_fp_sqr_NIST_P192L,%function
+mcl_fp_sqr_NIST_P192L: @ @mcl_fp_sqr_NIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r8, r0
+ add r0, sp, #12
+ bl mcl_fpDbl_sqrPre6L(PLT)
+ add r12, sp, #12
+ ldr lr, [sp, #48]
+ ldr r2, [sp, #44]
+ ldr r3, [sp, #40]
+ mov r4, #0
+ ldm r12, {r0, r1, r5, r6, r12}
+ ldr r7, [sp, #36]
+ adds r0, r7, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r1
+ mov r1, #0
+ adcs r10, r2, r5
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52]
+ ldr r5, [sp, #32]
+ adcs r11, lr, r6
+ ldr r6, [sp, #56]
+ adcs r9, r0, r12
+ adcs r5, r6, r5
+ adcs r1, r1, #0
+ adc r12, r4, #0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adds r4, r4, r0
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r7, r10, r7
+ adcs r3, r11, r3
+ adcs r2, r9, r2
+ adcs r5, r5, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r7, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r6
+ adcs r2, r2, #0
+ adcs r7, r5, #0
+ adcs r1, r1, #0
+ adc r6, r12, #0
+ adds r5, r1, r0
+ mov r0, #0
+ adcs r11, r6, r4
+ adcs r10, r1, lr
+ adcs r12, r6, r3
+ adcs lr, r2, #0
+ adcs r4, r7, #0
+ adc r9, r0, #0
+ adds r7, r5, #1
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r2, r11, #0
+ adcs r3, r10, #1
+ adcs r6, r12, #0
+ adcs r1, lr, #0
+ adcs r0, r4, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r5
+ movne r2, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r6, r12
+ movne r1, lr
+ str r7, [r8]
+ str r2, [r8, #4]
+ str r3, [r8, #8]
+ str r6, [r8, #12]
+ str r1, [r8, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movne r0, r1
+ str r0, [r8, #20]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 2
+ .type mcl_fp_mulNIST_P192L,%function
+mcl_fp_mulNIST_P192L: @ @mcl_fp_mulNIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r8, r0
+ add r0, sp, #12
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r12, sp, #12
+ ldr lr, [sp, #48]
+ ldr r2, [sp, #44]
+ ldr r3, [sp, #40]
+ mov r4, #0
+ ldm r12, {r0, r1, r5, r6, r12}
+ ldr r7, [sp, #36]
+ adds r0, r7, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r1
+ mov r1, #0
+ adcs r10, r2, r5
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52]
+ ldr r5, [sp, #32]
+ adcs r11, lr, r6
+ ldr r6, [sp, #56]
+ adcs r9, r0, r12
+ adcs r5, r6, r5
+ adcs r1, r1, #0
+ adc r12, r4, #0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adds r4, r4, r0
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r7, r10, r7
+ adcs r3, r11, r3
+ adcs r2, r9, r2
+ adcs r5, r5, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r7, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r6
+ adcs r2, r2, #0
+ adcs r7, r5, #0
+ adcs r1, r1, #0
+ adc r6, r12, #0
+ adds r5, r1, r0
+ mov r0, #0
+ adcs r11, r6, r4
+ adcs r10, r1, lr
+ adcs r12, r6, r3
+ adcs lr, r2, #0
+ adcs r4, r7, #0
+ adc r9, r0, #0
+ adds r7, r5, #1
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r2, r11, #0
+ adcs r3, r10, #1
+ adcs r6, r12, #0
+ adcs r1, lr, #0
+ adcs r0, r4, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r5
+ movne r2, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r6, r12
+ movne r1, lr
+ str r7, [r8]
+ str r2, [r8, #4]
+ str r3, [r8, #8]
+ str r6, [r8, #12]
+ str r1, [r8, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movne r0, r1
+ str r0, [r8, #20]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P521L,%function
+mcl_fpDbl_mod_NIST_P521L: @ @mcl_fpDbl_mod_NIST_P521L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r6, [r1, #64]
+ mov r5, #255
+ ldr r3, [r1, #72]
+ ldr r2, [r1, #76]
+ mov r9, r0
+ orr r5, r5, #256
+ and r5, r6, r5
+ lsr r6, r6, #9
+ lsr r7, r3, #9
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #68]
+ orr r12, r7, r2, lsl #23
+ lsr r2, r2, #9
+ lsr r4, r5, #9
+ orr r6, r6, r5, lsl #23
+ ldr r5, [r1]
+ orr r3, r4, r3, lsl #23
+ ldmib r1, {r4, r7, lr}
+ adds r5, r6, r5
+ ldr r6, [r1, #36]
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r1, #80]
+ adcs r3, r3, r4
+ str r3, [sp, #32] @ 4-byte Spill
+ adcs r7, r12, r7
+ ldr r3, [r1, #84]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #88]
+ orr r2, r2, r5, lsl #23
+ lsr r5, r5, #9
+ adcs r12, r2, lr
+ ldr r2, [r1, #16]
+ orr r4, r5, r3, lsl #23
+ lsr r3, r3, #9
+ orr r3, r3, r7, lsl #23
+ lsr r5, r7, #9
+ ldr r7, [r1, #40]
+ adcs r2, r4, r2
+ ldr r4, [r1, #24]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ adcs r2, r3, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ orr r3, r5, r2, lsl #23
+ ldr r5, [r1, #28]
+ lsr r2, r2, #9
+ adcs lr, r3, r4
+ ldr r3, [r1, #96]
+ ldr r4, [r1, #44]
+ orr r2, r2, r3, lsl #23
+ adcs r2, r2, r5
+ ldr r5, [r1, #32]
+ str r2, [sp, #16] @ 4-byte Spill
+ lsr r2, r3, #9
+ ldr r3, [r1, #100]
+ orr r2, r2, r3, lsl #23
+ adcs r2, r2, r5
+ ldr r5, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ lsr r2, r3, #9
+ ldr r3, [r1, #104]
+ orr r2, r2, r3, lsl #23
+ adcs r0, r2, r6
+ lsr r2, r3, #9
+ ldr r3, [r1, #108]
+ ldr r6, [r1, #52]
+ str r0, [sp, #8] @ 4-byte Spill
+ orr r2, r2, r3, lsl #23
+ adcs r7, r2, r7
+ lsr r2, r3, #9
+ ldr r3, [r1, #112]
+ orr r2, r2, r3, lsl #23
+ lsr r3, r3, #9
+ adcs r2, r2, r4
+ ldr r4, [r1, #116]
+ orr r3, r3, r4, lsl #23
+ lsr r4, r4, #9
+ adcs r3, r3, r5
+ ldr r5, [r1, #120]
+ orr r4, r4, r5, lsl #23
+ adcs r11, r4, r6
+ lsr r4, r5, #9
+ ldr r5, [r1, #124]
+ ldr r6, [r1, #56]
+ orr r4, r4, r5, lsl #23
+ adcs r10, r4, r6
+ lsr r4, r5, #9
+ ldr r5, [r1, #128]
+ ldr r1, [r1, #60]
+ orr r4, r4, r5, lsl #23
+ adcs r8, r4, r1
+ ldr r4, [sp, #40] @ 4-byte Reload
+ lsr r1, r5, #9
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r4
+ mov r4, #1
+ and r4, r4, r1, lsr #9
+ adds r5, r4, r5
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r5, [sp, #40] @ 4-byte Spill
+ adcs r6, r4, #0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ adcs r0, r4, #0
+ and r4, r6, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ and r4, r4, r0
+ adcs r0, r12, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ and r6, r4, r0
+ adcs r0, r5, #0
+ and r4, r6, r0
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ and r5, r4, r0
+ adcs r0, lr, #0
+ and r5, r5, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs lr, r6, #0
+ and r6, r5, lr
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r5, r5, #0
+ and r12, r6, r5
+ adcs r6, r0, #0
+ adcs r7, r7, #0
+ and r4, r12, r6
+ adcs r2, r2, #0
+ and r4, r4, r7
+ adcs r3, r3, #0
+ and r4, r4, r2
+ adcs r0, r11, #0
+ and r4, r4, r3
+ adcs r10, r10, #0
+ and r4, r4, r0
+ adcs r11, r8, #0
+ and r4, r4, r10
+ adc r8, r1, #0
+ ldr r1, .LCPI4_0
+ and r4, r4, r11
+ orr r1, r8, r1
+ and r1, r4, r1
+ cmn r1, #1
+ beq .LBB4_2
+@ BB#1: @ %nonzero
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r9]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r9, #4]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r1, [r9, #8]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r1, [r9, #12]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r1, [r9, #16]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r1, [r9, #20]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r1, [r9, #24]
+ add r1, r9, #32
+ str lr, [r9, #28]
+ stm r1, {r5, r6, r7}
+ add r1, r9, #52
+ str r2, [r9, #44]
+ str r3, [r9, #48]
+ stm r1, {r0, r10, r11}
+ mov r1, #255
+ orr r1, r1, #256
+ and r1, r8, r1
+ str r1, [r9, #64]
+ b .LBB4_3
+.LBB4_2: @ %zero
+ mov r0, r9
+ mov r1, #0
+ mov r2, #68
+ bl memset(PLT)
+.LBB4_3: @ %zero
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+ .align 2
+@ BB#4:
+.LCPI4_0:
+ .long 4294966784 @ 0xfffffe00
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 2
+ .type mcl_fp_mulUnitPre1L,%function
+mcl_fp_mulUnitPre1L: @ @mcl_fp_mulUnitPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ umull r3, r12, r1, r2
+ stm r0, {r3, r12}
+ mov pc, lr
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 2
+ .type mcl_fpDbl_mulPre1L,%function
+mcl_fpDbl_mulPre1L: @ @mcl_fpDbl_mulPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ umull r3, r12, r2, r1
+ stm r0, {r3, r12}
+ mov pc, lr
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 2
+ .type mcl_fpDbl_sqrPre1L,%function
+mcl_fpDbl_sqrPre1L: @ @mcl_fpDbl_sqrPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ umull r2, r3, r1, r1
+ stm r0, {r2, r3}
+ mov pc, lr
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont1L
+ .align 2
+ .type mcl_fp_mont1L,%function
+mcl_fp_mont1L: @ @mcl_fp_mont1L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r12, [r2]
+ ldr r1, [r1]
+ mov r6, #0
+ umull lr, r2, r1, r12
+ ldr r12, [r3, #-4]
+ ldr r3, [r3]
+ mul r1, lr, r12
+ umull r12, r4, r1, r3
+ adds r5, r12, lr
+ adcs r5, r4, r2
+ umlal lr, r2, r1, r3
+ adc r6, r6, #0
+ subs r1, r2, r3
+ sbc r3, r6, #0
+ tst r3, #1
+ movne r1, r2
+ str r1, [r0]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF1L
+ .align 2
+ .type mcl_fp_montNF1L,%function
+mcl_fp_montNF1L: @ @mcl_fp_montNF1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldr r12, [r2]
+ ldr r1, [r1]
+ umull lr, r2, r1, r12
+ ldr r12, [r3, #-4]
+ ldr r3, [r3]
+ mul r1, lr, r12
+ umlal lr, r2, r1, r3
+ sub r1, r2, r3
+ cmp r1, #0
+ movge r2, r1
+ str r2, [r0]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed1L
+ .align 2
+ .type mcl_fp_montRed1L,%function
+mcl_fp_montRed1L: @ @mcl_fp_montRed1L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r12, [r2, #-4]
+ ldr r3, [r1]
+ ldr r2, [r2]
+ ldr r1, [r1, #4]
+ mov r6, #0
+ mul lr, r3, r12
+ umull r12, r4, lr, r2
+ adds r5, r3, r12
+ adcs r5, r1, r4
+ umlal r3, r1, lr, r2
+ adc r6, r6, #0
+ subs r2, r1, r2
+ sbc r3, r6, #0
+ tst r3, #1
+ movne r2, r1
+ str r2, [r0]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre1L
+ .align 2
+ .type mcl_fp_addPre1L,%function
+mcl_fp_addPre1L: @ @mcl_fp_addPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ adds r1, r2, r1
+ str r1, [r0]
+ mov r0, #0
+ adc r0, r0, #0
+ mov pc, lr
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre1L
+ .align 2
+ .type mcl_fp_subPre1L,%function
+mcl_fp_subPre1L: @ @mcl_fp_subPre1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ subs r1, r1, r2
+ str r1, [r0]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ mov pc, lr
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_1L
+ .align 2
+ .type mcl_fp_shr1_1L,%function
+mcl_fp_shr1_1L: @ @mcl_fp_shr1_1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ lsr r1, r1, #1
+ str r1, [r0]
+ mov pc, lr
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add1L
+ .align 2
+ .type mcl_fp_add1L,%function
+mcl_fp_add1L: @ @mcl_fp_add1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ ldr r3, [r3]
+ adds r1, r2, r1
+ mov r2, #0
+ str r1, [r0]
+ adc r2, r2, #0
+ subs r1, r1, r3
+ sbc r2, r2, #0
+ tst r2, #1
+ streq r1, [r0]
+ mov pc, lr
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF1L
+ .align 2
+ .type mcl_fp_addNF1L,%function
+mcl_fp_addNF1L: @ @mcl_fp_addNF1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ add r1, r2, r1
+ ldr r2, [r3]
+ sub r2, r1, r2
+ cmp r2, #0
+ movlt r2, r1
+ str r2, [r0]
+ mov pc, lr
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub1L
+ .align 2
+ .type mcl_fp_sub1L,%function
+mcl_fp_sub1L: @ @mcl_fp_sub1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ subs r1, r1, r2
+ mov r2, #0
+ sbc r2, r2, #0
+ str r1, [r0]
+ tst r2, #1
+ ldrne r2, [r3]
+ addne r1, r2, r1
+ strne r1, [r0]
+ movne pc, lr
+ mov pc, lr
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF1L
+ .align 2
+ .type mcl_fp_subNF1L,%function
+mcl_fp_subNF1L: @ @mcl_fp_subNF1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ sub r1, r1, r2
+ ldr r2, [r3]
+ cmp r1, #0
+ addlt r1, r1, r2
+ str r1, [r0]
+ mov pc, lr
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add1L
+ .align 2
+ .type mcl_fpDbl_add1L,%function
+mcl_fpDbl_add1L: @ @mcl_fpDbl_add1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ ldr r3, [r3]
+ adds r1, r1, r12
+ str r1, [r0]
+ mov r1, #0
+ adcs r2, r2, lr
+ adc r1, r1, #0
+ subs r3, r2, r3
+ sbc r1, r1, #0
+ tst r1, #1
+ movne r3, r2
+ str r3, [r0, #4]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub1L
+ .align 2
+ .type mcl_fpDbl_sub1L,%function
+mcl_fpDbl_sub1L: @ @mcl_fpDbl_sub1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ ldr r3, [r3]
+ subs r2, r2, r12
+ str r2, [r0]
+ mov r2, #0
+ sbcs r1, r1, lr
+ sbc r2, r2, #0
+ tst r2, #1
+ addne r1, r1, r3
+ str r1, [r0, #4]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 2
+ .type mcl_fp_mulUnitPre2L,%function
+mcl_fp_mulUnitPre2L: @ @mcl_fp_mulUnitPre2L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r1, {r3, lr}
+ umull r12, r1, r3, r2
+ mov r3, #0
+ umlal r1, r3, lr, r2
+ str r12, [r0]
+ stmib r0, {r1, r3}
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 2
+ .type mcl_fpDbl_mulPre2L,%function
+mcl_fpDbl_mulPre2L: @ @mcl_fpDbl_mulPre2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r2, [r2, #4]
+ mov r5, #0
+ umull r1, r4, r12, r3
+ umlal r4, r5, lr, r3
+ umull r3, r6, r12, r2
+ str r1, [r0]
+ mov r1, #0
+ adds r3, r3, r4
+ str r3, [r0, #4]
+ umull r3, r4, lr, r2
+ adcs r2, r3, r5
+ adc r1, r1, #0
+ adds r2, r2, r6
+ adc r1, r1, r4
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 2
+ .type mcl_fpDbl_sqrPre2L,%function
+mcl_fpDbl_sqrPre2L: @ @mcl_fpDbl_sqrPre2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ mov r4, #0
+ mov lr, #0
+ umull r12, r3, r2, r2
+ umull r5, r6, r1, r2
+ umlal r3, r4, r1, r2
+ str r12, [r0]
+ adds r2, r3, r5
+ umull r3, r5, r1, r1
+ adcs r1, r4, r3
+ str r2, [r0, #4]
+ adc r3, lr, #0
+ adds r1, r1, r6
+ adc r3, r3, r5
+ str r1, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont2L
+ .align 2
+ .type mcl_fp_mont2L,%function
+mcl_fp_mont2L: @ @mcl_fp_mont2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ mov r7, #0
+ mov r5, #0
+ mov r6, #0
+ umull r8, r9, r2, r12
+ umull r11, r4, r12, r1
+ umlal r9, r7, r2, lr
+ umlal r4, r5, lr, r1
+ ldmda r3, {r12, lr}
+ ldr r10, [r3, #4]
+ mul r1, r11, r12
+ umull r3, r2, r1, lr
+ adds r3, r3, r11
+ mov r3, #0
+ umlal r2, r3, r1, r10
+ adcs r1, r2, r4
+ adcs r2, r3, r5
+ adc r3, r6, #0
+ adds r1, r1, r8
+ adcs r8, r2, r9
+ mul r5, r1, r12
+ adcs r3, r3, r7
+ umull r7, r2, r5, lr
+ adc r4, r6, #0
+ umlal r2, r6, r5, r10
+ adds r1, r7, r1
+ adcs r1, r2, r8
+ adcs r2, r6, r3
+ adc r3, r4, #0
+ subs r7, r1, lr
+ sbcs r6, r2, r10
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r7, r1
+ movne r6, r2
+ str r7, [r0]
+ str r6, [r0, #4]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF2L
+ .align 2
+ .type mcl_fp_montNF2L,%function
+mcl_fp_montNF2L: @ @mcl_fp_montNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r11, [r1]
+ ldr r8, [r3, #-4]
+ ldr r7, [r3]
+ ldr r9, [r1, #4]
+ ldr r3, [r3, #4]
+ umull r4, r5, r11, r12
+ mul r6, r4, r8
+ umull r1, r10, r6, r7
+ adds r1, r1, r4
+ mov r4, #0
+ umlal r5, r4, r9, r12
+ umull r2, r12, r6, r3
+ mov r1, #0
+ adcs r2, r2, r5
+ adc r4, r4, #0
+ adds r2, r2, r10
+ adc r6, r4, r12
+ umull r5, r4, lr, r11
+ adds r2, r5, r2
+ umlal r4, r1, lr, r9
+ adcs r9, r4, r6
+ mul r5, r2, r8
+ adc lr, r1, #0
+ umull r1, r6, r5, r7
+ umull r4, r12, r5, r3
+ adds r1, r1, r2
+ adcs r1, r4, r9
+ adc r2, lr, #0
+ adds r1, r1, r6
+ adc r2, r2, r12
+ subs r7, r1, r7
+ sbc r3, r2, r3
+ cmp r3, #0
+ movlt r7, r1
+ movlt r3, r2
+ str r7, [r0]
+ str r3, [r0, #4]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed2L
+ .align 2
+ .type mcl_fp_montRed2L,%function
+mcl_fp_montRed2L: @ @mcl_fp_montRed2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldr r12, [r2, #-4]
+ ldm r2, {r3, lr}
+ ldm r1, {r2, r9, r10}
+ ldr r8, [r1, #12]
+ mov r5, #0
+ mov r7, #0
+ mul r6, r2, r12
+ umull r1, r4, r6, r3
+ umlal r4, r5, r6, lr
+ adds r1, r2, r1
+ adcs r1, r9, r4
+ adcs r9, r10, r5
+ mul r6, r1, r12
+ adcs r8, r8, #0
+ umull r2, r4, r6, r3
+ adc r5, r7, #0
+ umlal r4, r7, r6, lr
+ adds r1, r2, r1
+ adcs r1, r4, r9
+ adcs r2, r7, r8
+ adc r7, r5, #0
+ subs r3, r1, r3
+ sbcs r6, r2, lr
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r3, r1
+ movne r6, r2
+ stm r0, {r3, r6}
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre2L
+ .align 2
+ .type mcl_fp_addPre2L,%function
+mcl_fp_addPre2L: @ @mcl_fp_addPre2L
+ .fnstart
+@ BB#0:
+ ldm r1, {r3, r12}
+ ldm r2, {r1, r2}
+ adds r1, r1, r3
+ adcs r2, r2, r12
+ stm r0, {r1, r2}
+ mov r0, #0
+ adc r0, r0, #0
+ mov pc, lr
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre2L
+ .align 2
+ .type mcl_fp_subPre2L,%function
+mcl_fp_subPre2L: @ @mcl_fp_subPre2L
+ .fnstart
+@ BB#0:
+ ldm r2, {r3, r12}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ subs r2, r2, r3
+ sbcs r1, r1, r12
+ str r2, [r0]
+ str r1, [r0, #4]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ mov pc, lr
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_2L
+ .align 2
+ .type mcl_fp_shr1_2L,%function
+mcl_fp_shr1_2L: @ @mcl_fp_shr1_2L
+ .fnstart
+@ BB#0:
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ lsrs r3, r1, #1
+ lsr r1, r1, #1
+ rrx r2, r2
+ str r2, [r0]
+ str r1, [r0, #4]
+ mov pc, lr
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add2L
+ .align 2
+ .type mcl_fp_add2L,%function
+mcl_fp_add2L: @ @mcl_fp_add2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ adds r12, r1, r12
+ mov r1, #0
+ adcs r2, r2, lr
+ str r12, [r0]
+ str r2, [r0, #4]
+ adc lr, r1, #0
+ ldm r3, {r1, r4}
+ subs r3, r12, r1
+ sbcs r2, r2, r4
+ sbc r1, lr, #0
+ tst r1, #1
+ streq r3, [r0]
+ streq r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF2L
+ .align 2
+ .type mcl_fp_addNF2L,%function
+mcl_fp_addNF2L: @ @mcl_fp_addNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ adds r1, r1, r12
+ adc r4, r2, lr
+ ldm r3, {r12, lr}
+ subs r3, r1, r12
+ sbc r2, r4, lr
+ cmp r2, #0
+ movlt r3, r1
+ movlt r2, r4
+ str r3, [r0]
+ str r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub2L
+ .align 2
+ .type mcl_fp_sub2L,%function
+mcl_fp_sub2L: @ @mcl_fp_sub2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r12, lr}
+ ldm r1, {r2, r4}
+ subs r1, r2, r12
+ sbcs r2, r4, lr
+ mov r4, #0
+ sbc r4, r4, #0
+ stm r0, {r1, r2}
+ tst r4, #1
+ popeq {r4, lr}
+ moveq pc, lr
+ ldr r4, [r3]
+ ldr r3, [r3, #4]
+ adds r1, r4, r1
+ adc r2, r3, r2
+ stm r0, {r1, r2}
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF2L
+ .align 2
+ .type mcl_fp_subNF2L,%function
+mcl_fp_subNF2L: @ @mcl_fp_subNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ subs r4, r2, r12
+ sbc r1, r1, lr
+ ldm r3, {r12, lr}
+ adds r3, r4, r12
+ adc r2, r1, lr
+ cmp r1, #0
+ movge r3, r4
+ movge r2, r1
+ str r3, [r0]
+ str r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add2L
+ .align 2
+ .type mcl_fpDbl_add2L,%function
+mcl_fpDbl_add2L: @ @mcl_fpDbl_add2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ str r5, [r0]
+ adcs r7, r7, r4
+ str r6, [r0, #4]
+ mov r6, #0
+ adcs r1, r2, r1
+ adc r2, r6, #0
+ ldr r6, [r3]
+ ldr r3, [r3, #4]
+ subs r6, r7, r6
+ sbcs r3, r1, r3
+ sbc r2, r2, #0
+ ands r2, r2, #1
+ movne r6, r7
+ movne r3, r1
+ str r6, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub2L
+ .align 2
+ .type mcl_fpDbl_sub2L,%function
+mcl_fpDbl_sub2L: @ @mcl_fpDbl_sub2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r2, [r2, #12]
+ ldm r1, {r5, r6, r7}
+ ldr r1, [r1, #12]
+ subs r5, r5, r12
+ sbcs r6, r6, lr
+ str r5, [r0]
+ sbcs r7, r7, r4
+ str r6, [r0, #4]
+ mov r6, #0
+ sbcs r1, r1, r2
+ sbc r2, r6, #0
+ ldr r6, [r3]
+ ldr r3, [r3, #4]
+ adds r6, r7, r6
+ adc r3, r1, r3
+ ands r2, r2, #1
+ moveq r6, r7
+ moveq r3, r1
+ str r6, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 2
+ .type mcl_fp_mulUnitPre3L,%function
+mcl_fp_mulUnitPre3L: @ @mcl_fp_mulUnitPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, r5}
+ umull lr, r4, r12, r2
+ umull r1, r12, r5, r2
+ umull r7, r8, r3, r2
+ mov r5, r1
+ mov r6, r4
+ str lr, [r0]
+ umlal r6, r5, r3, r2
+ adds r2, r4, r7
+ adcs r1, r8, r1
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ adc r1, r12, #0
+ str r1, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 2
+ .type mcl_fpDbl_mulPre3L,%function
+mcl_fpDbl_mulPre3L: @ @mcl_fpDbl_mulPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ umull r4, r5, r12, r3
+ str r4, [r0]
+ umull r4, r6, lr, r3
+ adds r4, r5, r4
+ umull r7, r4, r1, r3
+ adcs r6, r6, r7
+ umlal r5, r7, lr, r3
+ ldr r3, [r2, #4]
+ ldr r2, [r2, #8]
+ adc r8, r4, #0
+ umull r6, r10, r12, r3
+ adds r9, r6, r5
+ umull r6, r5, lr, r3
+ adcs r6, r6, r7
+ umull r7, r4, r1, r3
+ str r9, [r0, #4]
+ adcs r3, r7, r8
+ mov r8, #0
+ adc r7, r8, #0
+ adds r6, r6, r10
+ adcs r11, r3, r5
+ umull r5, r9, r1, r2
+ umull r1, r10, lr, r2
+ adc r4, r7, r4
+ umull r7, r3, r12, r2
+ adds r2, r6, r7
+ adcs r1, r11, r1
+ str r2, [r0, #8]
+ adcs r2, r4, r5
+ adc r7, r8, #0
+ adds r1, r1, r3
+ str r1, [r0, #12]
+ adcs r1, r2, r10
+ str r1, [r0, #16]
+ adc r1, r7, r9
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 2
+ .type mcl_fpDbl_sqrPre3L,%function
+mcl_fpDbl_sqrPre3L: @ @mcl_fpDbl_sqrPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r2, r3, r12}
+ mov r10, #0
+ umull r1, lr, r2, r2
+ umull r7, r4, r3, r2
+ str r1, [r0]
+ umull r1, r8, r12, r2
+ mov r5, lr
+ mov r6, r1
+ umlal r5, r6, r3, r2
+ adds r2, lr, r7
+ adcs r2, r4, r1
+ adc r2, r8, #0
+ adds lr, r5, r7
+ umull r5, r9, r3, r3
+ adcs r5, r6, r5
+ umull r6, r7, r12, r3
+ str lr, [r0, #4]
+ adcs r2, r2, r6
+ adc r3, r10, #0
+ adds r4, r5, r4
+ adcs r2, r2, r9
+ adc r3, r3, r7
+ adds r1, r4, r1
+ umull r5, r4, r12, r12
+ str r1, [r0, #8]
+ adcs r1, r2, r6
+ adcs r2, r3, r5
+ adc r3, r10, #0
+ adds r1, r1, r8
+ str r1, [r0, #12]
+ adcs r1, r2, r7
+ str r1, [r0, #16]
+ adc r1, r3, r4
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont3L
+ .align 2
+ .type mcl_fp_mont3L,%function
+mcl_fp_mont3L: @ @mcl_fp_mont3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r2, {r8, lr}
+ ldr r0, [r2, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldmib r1, {r4, r9}
+ ldr r2, [r3, #-4]
+ umull r7, r6, r0, r8
+ ldr r0, [r3]
+ ldr r1, [r3, #8]
+ ldr r10, [r3, #4]
+ str r7, [sp, #12] @ 4-byte Spill
+ mul r5, r7, r2
+ str r2, [sp, #16] @ 4-byte Spill
+ str r9, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r12, r2, r5, r1
+ umull r1, r3, r5, r0
+ umull r0, r7, r9, r8
+ umull r11, r9, r4, r8
+ str r7, [sp] @ 4-byte Spill
+ adds r7, r6, r11
+ str r1, [sp, #8] @ 4-byte Spill
+ mov r1, r3
+ str r2, [sp, #4] @ 4-byte Spill
+ mov r2, r12
+ adcs r7, r9, r0
+ umlal r1, r2, r5, r10
+ umlal r6, r0, r4, r8
+ mov r8, #0
+ ldr r7, [sp] @ 4-byte Reload
+ adc r9, r7, #0
+ umull r7, r11, r5, r10
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adds r3, r3, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r11, r12
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r7, r5, r7
+ adcs r11, r1, r6
+ adcs r12, r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r3, r9
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adc r8, r8, #0
+ umull r6, r7, lr, r0
+ umull r5, r0, lr, r4
+ umull r1, r2, lr, r3
+ adds r5, r2, r5
+ adcs r0, r0, r6
+ umlal r2, r6, lr, r4
+ adc r0, r7, #0
+ adds r1, r11, r1
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adcs r2, r12, r2
+ ldr r12, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r9, r6
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ mov r0, #0
+ mul r6, r1, r11
+ adc r0, r0, #0
+ umull r7, r9, r6, r12
+ str r0, [sp] @ 4-byte Spill
+ mov r5, r7
+ umull r8, r0, r6, r2
+ umull lr, r2, r6, r10
+ mov r3, r0
+ adds r0, r0, lr
+ ldr lr, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r7
+ umlal r3, r5, r6, r10
+ adc r0, r9, #0
+ adds r1, r8, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp] @ 4-byte Reload
+ umull r1, r2, r3, lr
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ umull r6, r7, r3, r0
+ umull r5, r0, r3, r4
+ adds r5, r2, r5
+ adcs r0, r0, r6
+ umlal r2, r6, r3, r4
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adc r0, r7, #0
+ adds r1, r3, r1
+ adcs r2, r8, r2
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r9, r9, r6
+ mul r6, r1, r11
+ umull r7, r4, r6, r12
+ ldr r12, [sp, #40] @ 4-byte Reload
+ mov r5, r7
+ adcs r0, r2, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ umull r11, r3, r6, r12
+ adc r8, r0, #0
+ umull r0, lr, r6, r10
+ mov r2, r3
+ adds r0, r3, r0
+ ldr r3, [sp, #32] @ 4-byte Reload
+ umlal r2, r5, r6, r10
+ adcs r0, lr, r7
+ adc r0, r4, #0
+ adds r1, r11, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ adcs r2, r5, r9
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ adc r3, r8, #0
+ subs r7, r1, r12
+ sbcs r6, r2, r10
+ sbcs r5, r0, r5
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r5, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ movne r7, r1
+ movne r6, r2
+ str r7, [r0]
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF3L
+ .align 2
+ .type mcl_fp_montNF3L,%function
+mcl_fp_montNF3L: @ @mcl_fp_montNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r8, [r1]
+ ldmib r1, {r6, r9}
+ ldm r2, {r4, r7}
+ ldr r0, [r2, #8]
+ mov r10, r3
+ umull r3, r1, r0, r9
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r1, r2, r0, r8
+ str r3, [sp, #44] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ str r2, [sp, #40] @ 4-byte Spill
+ mov r1, r2
+ mov r2, r3
+ umull r3, r5, r0, r6
+ umlal r1, r2, r0, r6
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ str r5, [sp, #36] @ 4-byte Spill
+ str r1, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #60] @ 4-byte Spill
+ umull r2, r1, r7, r9
+ str r0, [sp, #8] @ 4-byte Spill
+ str r3, [sp, #4] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r1, r11, r7, r8
+ str r2, [sp, #16] @ 4-byte Spill
+ str r1, [sp, #24] @ 4-byte Spill
+ mov r1, r2
+ str r11, [sp, #12] @ 4-byte Spill
+ umlal r11, r1, r7, r6
+ umull r0, r7, r6, r4
+ str r1, [sp, #20] @ 4-byte Spill
+ umull lr, r1, r9, r4
+ umull r9, r2, r8, r4
+ ldr r8, [r10, #-4]
+ adds r0, r2, r0
+ str r1, [sp] @ 4-byte Spill
+ mov r1, r2
+ mov r12, lr
+ adcs r0, r7, lr
+ umlal r1, r12, r6, r4
+ ldr r0, [sp] @ 4-byte Reload
+ ldm r10, {r6, r7}
+ mul r2, r9, r8
+ adc r3, r0, #0
+ ldr r0, [r10, #8]
+ umull r4, lr, r2, r6
+ adds r4, r4, r9
+ umull r4, r9, r2, r7
+ adcs r1, r4, r1
+ umull r4, r5, r2, r0
+ adcs r2, r4, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r1, r1, lr
+ adcs r2, r2, r9
+ adc r3, r3, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r5, r5, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r4, r1
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r11, r2
+ adcs r12, r4, r3
+ mul r4, r1, r8
+ umull r3, r9, r4, r6
+ adc lr, r5, #0
+ adds r1, r3, r1
+ umull r1, r3, r4, r7
+ adcs r1, r1, r2
+ umull r2, r5, r4, r0
+ adcs r2, r2, r12
+ adc r4, lr, #0
+ adds r1, r1, r9
+ adcs r12, r2, r3
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adc r9, r4, r5
+ adds r5, r2, r3
+ ldr r2, [sp, #44] @ 4-byte Reload
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r5, r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc lr, r2, #0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adds r1, r2, r1
+ mul r4, r1, r8
+ umull r10, r2, r4, r0
+ umull r3, r8, r4, r7
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r2, r11, r4, r6
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r12
+ adcs r12, r5, r9
+ adc r5, lr, #0
+ adds r1, r2, r1
+ adcs r1, r3, r4
+ adcs r2, r10, r12
+ adc r3, r5, #0
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adds r1, r1, r11
+ adcs r2, r2, r8
+ adc r3, r3, r5
+ subs r6, r1, r6
+ sbcs r7, r2, r7
+ sbc r0, r3, r0
+ asr r5, r0, #31
+ cmp r5, #0
+ movlt r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ movlt r7, r2
+ movlt r0, r3
+ stm r1, {r6, r7}
+ str r0, [r1, #8]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed3L
+ .align 2
+ .type mcl_fp_montRed3L,%function
+mcl_fp_montRed3L: @ @mcl_fp_montRed3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldr r5, [r2]
+ ldr lr, [r2, #-4]
+ ldr r3, [r2, #4]
+ ldr r2, [r2, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ str r5, [sp, #20] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r1, {r4, r7}
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ mul r6, r4, lr
+ umull r10, r8, r6, r3
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r7, r9, r6, r2
+ umull r11, r2, r6, r5
+ mov r0, r2
+ adds r2, r2, r10
+ mov r12, r7
+ adcs r2, r8, r7
+ umlal r0, r12, r6, r3
+ ldr r8, [r1, #20]
+ ldr r1, [r1, #16]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adc r10, r9, #0
+ adds r7, r4, r11
+ mov r11, lr
+ adcs r9, r2, r0
+ ldr r2, [sp] @ 4-byte Reload
+ mul r7, r9, lr
+ umull lr, r0, r7, r2
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r4, r0, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ mov r6, lr
+ str r4, [sp, #4] @ 4-byte Spill
+ mov r4, r0
+ umlal r4, r6, r7, r3
+ adcs r12, r5, r12
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r10, r5, r10
+ adcs r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ adcs r1, r8, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ mov r1, #0
+ adc r8, r1, #0
+ umull r1, r5, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adds r1, r0, r1
+ adcs r0, r5, lr
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r1, r1, r9
+ adcs r1, r4, r12
+ adcs lr, r6, r10
+ ldr r6, [sp, #20] @ 4-byte Reload
+ mul r5, r1, r11
+ mov r11, r2
+ adcs r0, r0, r7
+ umull r4, r12, r5, r2
+ umull r2, r7, r5, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r10, r0, #0
+ umull r9, r0, r5, r6
+ adc r8, r8, #0
+ adds r2, r0, r2
+ mov r2, r4
+ adcs r4, r7, r4
+ adc r7, r12, #0
+ adds r1, r9, r1
+ umlal r0, r2, r5, r3
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, lr
+ adcs r1, r2, r1
+ adcs r2, r7, r10
+ adc r7, r8, #0
+ subs r6, r0, r6
+ sbcs r3, r1, r3
+ sbcs r5, r2, r11
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r6, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ movne r3, r1
+ movne r5, r2
+ str r6, [r0]
+ stmib r0, {r3, r5}
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre3L
+ .align 2
+ .type mcl_fp_addPre3L,%function
+mcl_fp_addPre3L: @ @mcl_fp_addPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r3, r12, lr}
+ ldm r2, {r1, r4}
+ ldr r2, [r2, #8]
+ adds r1, r1, r3
+ adcs r3, r4, r12
+ adcs r2, r2, lr
+ stm r0, {r1, r3}
+ str r2, [r0, #8]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre3L
+ .align 2
+ .type mcl_fp_subPre3L,%function
+mcl_fp_subPre3L: @ @mcl_fp_subPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r3, r12, lr}
+ ldm r1, {r2, r4}
+ ldr r1, [r1, #8]
+ subs r2, r2, r3
+ sbcs r3, r4, r12
+ sbcs r1, r1, lr
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_3L
+ .align 2
+ .type mcl_fp_shr1_3L,%function
+mcl_fp_shr1_3L: @ @mcl_fp_shr1_3L
+ .fnstart
+@ BB#0:
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr r1, [r1, #8]
+ lsrs r2, r3, #1
+ lsr r3, r3, #1
+ orr r3, r3, r1, lsl #31
+ rrx r2, r12
+ lsr r1, r1, #1
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ mov pc, lr
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add3L
+ .align 2
+ .type mcl_fp_add3L,%function
+mcl_fp_add3L: @ @mcl_fp_add3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r11, lr}
+ push {r4, r5, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ ldm r2, {r4, r5}
+ ldr r2, [r2, #8]
+ adds r4, r4, r12
+ adcs r5, r5, lr
+ adcs r1, r2, r1
+ stm r0, {r4, r5}
+ mov r2, #0
+ str r1, [r0, #8]
+ adc r12, r2, #0
+ ldm r3, {r2, lr}
+ ldr r3, [r3, #8]
+ subs r4, r4, r2
+ sbcs r5, r5, lr
+ sbcs r3, r1, r3
+ sbc r1, r12, #0
+ tst r1, #1
+ stmeq r0, {r4, r5}
+ streq r3, [r0, #8]
+ pop {r4, r5, r11, lr}
+ mov pc, lr
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF3L
+ .align 2
+ .type mcl_fp_addNF3L,%function
+mcl_fp_addNF3L: @ @mcl_fp_addNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ ldm r2, {r4, r5}
+ ldr r2, [r2, #8]
+ adds r4, r4, r12
+ adcs r5, r5, lr
+ adc r7, r2, r1
+ ldm r3, {r2, r12, lr}
+ subs r2, r4, r2
+ sbcs r3, r5, r12
+ sbc r1, r7, lr
+ asr r6, r1, #31
+ cmp r6, #0
+ movlt r2, r4
+ movlt r3, r5
+ movlt r1, r7
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub3L
+ .align 2
+ .type mcl_fp_sub3L,%function
+mcl_fp_sub3L: @ @mcl_fp_sub3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldm r1, {r2, r5, r6}
+ subs r1, r2, r12
+ sbcs r2, r5, lr
+ sbcs r12, r6, r4
+ mov r6, #0
+ sbc r6, r6, #0
+ stm r0, {r1, r2, r12}
+ tst r6, #1
+ popeq {r4, r5, r6, lr}
+ moveq pc, lr
+ ldr r6, [r3]
+ ldr r5, [r3, #4]
+ ldr r3, [r3, #8]
+ adds r1, r6, r1
+ adcs r2, r5, r2
+ adc r3, r3, r12
+ stm r0, {r1, r2, r3}
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF3L
+ .align 2
+ .type mcl_fp_subNF3L,%function
+mcl_fp_subNF3L: @ @mcl_fp_subNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r2, #8]
+ ldm r1, {r4, r5}
+ ldr r1, [r1, #8]
+ subs r4, r4, r12
+ sbcs r7, r5, lr
+ sbc r1, r1, r2
+ ldm r3, {r2, r12, lr}
+ asr r6, r1, #31
+ adds r2, r4, r2
+ adcs r3, r7, r12
+ adc r5, r1, lr
+ cmp r6, #0
+ movge r2, r4
+ movge r3, r7
+ movge r5, r1
+ stm r0, {r2, r3, r5}
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add3L
+ .align 2
+ .type mcl_fpDbl_add3L,%function
+mcl_fpDbl_add3L: @ @mcl_fpDbl_add3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r7, [r2]
+ ldr r11, [r1, #8]
+ ldr r9, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r5, r6}
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #20]
+ adds r7, r7, r12
+ adcs r1, r1, lr
+ str r7, [r0]
+ str r1, [r0, #4]
+ adcs r1, r5, r11
+ ldr r5, [r3]
+ adcs r7, r6, r9
+ str r1, [r0, #8]
+ mov r1, #0
+ adcs r6, r4, r10
+ ldr r4, [r3, #4]
+ ldr r3, [r3, #8]
+ adcs r2, r2, r8
+ adc r1, r1, #0
+ subs r5, r7, r5
+ sbcs r4, r6, r4
+ sbcs r3, r2, r3
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r5, r7
+ movne r4, r6
+ movne r3, r2
+ str r5, [r0, #12]
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub3L
+ .align 2
+ .type mcl_fpDbl_sub3L,%function
+mcl_fpDbl_sub3L: @ @mcl_fpDbl_sub3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r7, [r1]
+ ldr r11, [r2, #8]
+ ldr r9, [r2, #12]
+ ldr r10, [r2, #16]
+ ldr r8, [r2, #20]
+ ldmib r1, {r2, r5, r6}
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #20]
+ subs r7, r7, r12
+ sbcs r2, r2, lr
+ str r7, [r0]
+ str r2, [r0, #4]
+ sbcs r2, r5, r11
+ ldr r5, [r3]
+ sbcs r7, r6, r9
+ str r2, [r0, #8]
+ mov r2, #0
+ sbcs r6, r4, r10
+ ldr r4, [r3, #4]
+ ldr r3, [r3, #8]
+ sbcs r1, r1, r8
+ sbc r2, r2, #0
+ adds r5, r7, r5
+ adcs r4, r6, r4
+ adc r3, r1, r3
+ ands r2, r2, #1
+ moveq r5, r7
+ moveq r4, r6
+ moveq r3, r1
+ str r5, [r0, #12]
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 2
+ .type mcl_fp_mulUnitPre4L,%function
+mcl_fp_mulUnitPre4L: @ @mcl_fp_mulUnitPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r1, [r1, #12]
+ umull r4, r6, r12, r2
+ umull r7, r12, lr, r2
+ str r4, [r0]
+ mov r5, r6
+ mov r4, r7
+ umlal r5, r4, r3, r2
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ umull r5, lr, r1, r2
+ umull r1, r4, r3, r2
+ adds r1, r6, r1
+ adcs r1, r4, r7
+ adcs r1, r12, r5
+ str r1, [r0, #12]
+ adc r1, lr, #0
+ str r1, [r0, #16]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 2
+ .type mcl_fpDbl_mulPre4L,%function
+mcl_fpDbl_mulPre4L: @ @mcl_fpDbl_mulPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov lr, r2
+ ldr r11, [r1]
+ ldr r4, [lr]
+ ldmib r1, {r8, r12}
+ ldr r3, [r1, #12]
+ umull r2, r7, r11, r4
+ umull r6, r9, r8, r4
+ str r12, [sp] @ 4-byte Spill
+ adds r6, r7, r6
+ str r2, [sp, #36] @ 4-byte Spill
+ mov r2, r3
+ umull r6, r10, r12, r4
+ adcs r5, r9, r6
+ umlal r7, r6, r8, r4
+ umull r5, r9, r3, r4
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r4, [lr, #4]
+ adcs r10, r10, r5
+ str r3, [r0]
+ adc r3, r9, #0
+ str r3, [sp, #24] @ 4-byte Spill
+ umull r5, r3, r11, r4
+ adds r7, r5, r7
+ str r3, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ umull r7, r3, r8, r4
+ str r3, [sp, #28] @ 4-byte Spill
+ adcs r3, r7, r6
+ umull r7, r9, r12, r4
+ mov r12, r2
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r7, r7, r10
+ umull r5, r10, r2, r4
+ ldr r2, [sp, #24] @ 4-byte Reload
+ mov r4, #0
+ adcs r5, r5, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r6, r3, r6
+ adcs r7, r7, r2
+ ldr r2, [lr, #12]
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r5, r9
+ str r7, [sp, #20] @ 4-byte Spill
+ adc r7, r4, r10
+ ldr r4, [lr, #8]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #4]
+ umull r5, r7, r11, r4
+ adds r5, r5, r6
+ str r7, [sp, #12] @ 4-byte Spill
+ str r5, [r0, #8]
+ ldm r1, {r11, lr}
+ ldr r5, [r1, #8]
+ ldr r1, [r1, #12]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ umull r6, r7, r1, r2
+ umull r10, r1, r5, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ umull r5, r1, lr, r2
+ str r6, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r6, r1, r11, r2
+ umull r2, r11, r12, r4
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ umull lr, r12, r1, r4
+ umull r9, r1, r8, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ mov r8, #0
+ adcs r3, r9, r3
+ adcs r4, lr, r4
+ adcs r2, r2, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adc lr, r8, #0
+ adds r3, r3, r7
+ adcs r1, r4, r1
+ adcs r2, r2, r12
+ adc r4, lr, r11
+ adds r3, r6, r3
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r1, r5, r1
+ adcs r2, r10, r2
+ adcs r3, r3, r4
+ adc r7, r8, #0
+ adds r1, r1, r6
+ str r1, [r0, #16]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [r0, #20]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r7, r1
+ str r1, [r0, #28]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 2
+ .type mcl_fpDbl_sqrPre4L,%function
+mcl_fpDbl_sqrPre4L: @ @mcl_fpDbl_sqrPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r2, r3, r12}
+ ldr r8, [r1, #12]
+ umull r4, r6, r2, r2
+ umull r11, lr, r12, r2
+ str r4, [r0]
+ umull r10, r4, r8, r2
+ mov r7, r11
+ mov r5, r6
+ str lr, [sp, #12] @ 4-byte Spill
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r4, r9, r3, r2
+ umlal r5, r7, r3, r2
+ adds r2, r6, r4
+ adcs r2, r9, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r10, lr, r10
+ adc r2, r2, #0
+ adds r4, r4, r5
+ str r2, [sp] @ 4-byte Spill
+ umull r6, r2, r3, r3
+ str r4, [sp, #8] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r5, r6, r7
+ umull r6, r7, r12, r3
+ adcs lr, r6, r10
+ umull r4, r10, r8, r3
+ adcs r3, r4, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ mov r4, #0
+ adc r4, r4, #0
+ adds r5, r5, r9
+ adcs r9, lr, r2
+ adcs r2, r3, r7
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adc r4, r4, r10
+ adds r5, r11, r5
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r2, r10, r8, r12
+ umull lr, r8, r12, r12
+ adcs r6, r6, r9
+ stmib r0, {r3, r5}
+ mov r5, #0
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs r3, lr, r3
+ adcs r2, r2, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r6, r6, r4
+ adcs r11, r3, r7
+ adcs lr, r2, r8
+ adc r8, r5, r10
+ ldr r5, [r1]
+ ldmib r1, {r4, r7}
+ ldr r1, [r1, #12]
+ umull r12, r2, r1, r1
+ umull r3, r9, r7, r1
+ umull r7, r10, r4, r1
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r4, r2, r5, r1
+ adds r1, r4, r6
+ adcs r4, r7, r11
+ str r1, [r0, #12]
+ mov r7, #0
+ adcs r3, r3, lr
+ adcs r1, r12, r8
+ adc r7, r7, #0
+ adds r2, r4, r2
+ str r2, [r0, #16]
+ adcs r2, r3, r10
+ adcs r1, r1, r9
+ str r2, [r0, #20]
+ str r1, [r0, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adc r1, r7, r1
+ str r1, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont4L
+ .align 2
+ .type mcl_fp_mont4L,%function
+mcl_fp_mont4L: @ @mcl_fp_mont4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #8]
+ ldr r9, [r2]
+ ldr r8, [r2, #4]
+ ldr r6, [r3, #-4]
+ ldr r11, [r1, #8]
+ ldr r10, [r1, #12]
+ ldr r7, [r3, #8]
+ ldr r5, [r3, #4]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ ldr r2, [r1, #4]
+ str r6, [sp, #44] @ 4-byte Spill
+ str r7, [sp, #40] @ 4-byte Spill
+ str r5, [sp, #52] @ 4-byte Spill
+ str r11, [sp, #60] @ 4-byte Spill
+ str r10, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1]
+ ldr r1, [r3]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r3, [r3, #12]
+ umull r4, r2, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ mul r0, r4, r6
+ str r4, [sp, #24] @ 4-byte Spill
+ mov r4, r5
+ umull lr, r6, r0, r7
+ umull r7, r12, r0, r1
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ mov r6, r12
+ str lr, [sp, #8] @ 4-byte Spill
+ umlal r6, lr, r0, r5
+ umull r5, r1, r10, r9
+ str r1, [sp, #68] @ 4-byte Spill
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r1, r10, r11, r9
+ umull r11, r5, r7, r9
+ adds r7, r2, r11
+ adcs r5, r5, r1
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r3, [sp, #68] @ 4-byte Spill
+ adc r5, r5, #0
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r5, r7, r0, r3
+ umull r10, r3, r0, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r0, r12, r10
+ mov r12, #0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adc r3, r7, #0
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r4, r5, r4
+ umlal r2, r1, r7, r9
+ adcs r2, r6, r2
+ adcs r1, lr, r1
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r9, r0, r11
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r6, r3, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r3, r7
+ adc r10, r12, #0
+ umull r2, r12, r8, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ umull r5, r4, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r1, lr, r8, r0
+ umull r11, r0, r8, r7
+ adds r2, r0, r2
+ adcs r2, r12, r1
+ umlal r0, r1, r8, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ ldr r8, [sp, #48] @ 4-byte Reload
+ adcs r2, lr, r5
+ adc r5, r4, #0
+ adds r7, r3, r11
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r9, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r6, r2
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r10, r5
+ ldr r10, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ mul r5, r7, r10
+ umull r6, r0, r5, r11
+ str r0, [sp] @ 4-byte Spill
+ umull r0, r3, r5, r8
+ mov r4, r6
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r2, r3
+ umlal r2, r4, r5, r1
+ umull r9, r12, r5, r0
+ umull lr, r0, r5, r1
+ adds r3, r3, lr
+ adcs r0, r0, r6
+ ldr r3, [sp, #4] @ 4-byte Reload
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r0, r9
+ adc r1, r12, #0
+ adds r3, r3, r7
+ ldr r12, [sp, #64] @ 4-byte Reload
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ umull r9, r7, r3, r12
+ adcs r2, r4, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r6, r5, r3, r0
+ umull r0, r4, r3, r1
+ umull r1, lr, r3, r2
+ adds r1, r7, r1
+ adcs r1, lr, r0
+ umlal r7, r0, r3, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r4, r6
+ adc r6, r5, #0
+ adds r3, r2, r9
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ mul r6, r3, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ mov r0, #0
+ umull r7, r9, r6, r11
+ umull r10, r4, r6, r8
+ adc r0, r0, #0
+ mov r2, r4
+ mov r5, r7
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ umlal r2, r5, r6, r1
+ umull r8, r12, r6, r0
+ umull lr, r0, r6, r1
+ adds r6, r4, lr
+ adcs r0, r0, r7
+ adcs r0, r9, r8
+ adc r1, r12, #0
+ adds r3, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r8, r5, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ umull lr, r7, r3, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r6, r10, r3, r0
+ umull r0, r4, r3, r1
+ umull r1, r12, r3, r2
+ adds r1, r7, r1
+ adcs r1, r12, r0
+ umlal r7, r0, r3, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r12, [sp, #68] @ 4-byte Reload
+ adcs r1, r4, r6
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adc r6, r10, #0
+ adds lr, r2, lr
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r10, r8, r7
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ adcs r0, r11, r1
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r8, r0, r6
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r6, lr, r0
+ umull r1, r3, r6, r5
+ umull r11, r7, r6, r2
+ umull r0, r9, r6, r4
+ adds r1, r7, r1
+ adcs r1, r3, r0
+ umlal r7, r0, r6, r5
+ umull r1, r3, r6, r12
+ adcs r1, r9, r1
+ mov r9, r5
+ adc r5, r3, #0
+ adds r3, r11, lr
+ adcs r3, r7, r10
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ adcs lr, r5, r8
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r8, r5, #0
+ subs r6, r3, r2
+ sbcs r5, r0, r9
+ sbcs r4, r1, r4
+ sbcs r7, lr, r12
+ sbc r2, r8, #0
+ ands r2, r2, #1
+ movne r5, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ movne r6, r3
+ movne r4, r1
+ cmp r2, #0
+ movne r7, lr
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF4L
+ .align 2
+ .type mcl_fp_montNF4L,%function
+mcl_fp_montNF4L: @ @mcl_fp_montNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #140
+ sub sp, sp, #140
+ mov r10, r3
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr lr, [r1]
+ ldmib r1, {r4, r8, r12}
+ ldr r3, [r2]
+ ldr r1, [r2, #4]
+ ldr r0, [r2, #8]
+ ldr r2, [r2, #12]
+ umull r6, r5, r2, r8
+ str r5, [sp, #124] @ 4-byte Spill
+ umull r5, r7, r2, lr
+ str r6, [sp, #112] @ 4-byte Spill
+ str r5, [sp, #128] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ str r7, [sp, #108] @ 4-byte Spill
+ umlal r6, r5, r2, r4
+ str r5, [sp, #120] @ 4-byte Spill
+ umull r7, r5, r0, r8
+ str r6, [sp, #116] @ 4-byte Spill
+ str r5, [sp, #84] @ 4-byte Spill
+ umull r5, r6, r0, lr
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ str r6, [sp, #68] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ umlal r5, r6, r0, r4
+ str r5, [sp, #76] @ 4-byte Spill
+ str r6, [sp, #80] @ 4-byte Spill
+ umull r6, r5, r1, r8
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r5, r7, r1, lr
+ str r6, [sp, #32] @ 4-byte Spill
+ str r5, [sp, #48] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ str r7, [sp, #28] @ 4-byte Spill
+ umlal r6, r5, r1, r4
+ str r5, [sp, #40] @ 4-byte Spill
+ umull r9, r5, r8, r3
+ str r6, [sp, #36] @ 4-byte Spill
+ str r5, [sp, #136] @ 4-byte Spill
+ umull r6, r5, lr, r3
+ mov r8, r9
+ str r6, [sp, #4] @ 4-byte Spill
+ umull r11, r6, r2, r12
+ mov lr, r5
+ str r6, [sp, #104] @ 4-byte Spill
+ umull r7, r6, r2, r4
+ umlal lr, r8, r4, r3
+ str r11, [sp, #100] @ 4-byte Spill
+ str r6, [sp, #96] @ 4-byte Spill
+ umull r6, r2, r0, r12
+ str r7, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #60] @ 4-byte Spill
+ str r2, [sp, #64] @ 4-byte Spill
+ umull r6, r2, r0, r4
+ str r2, [sp, #56] @ 4-byte Spill
+ umull r2, r0, r1, r12
+ str r6, [sp, #52] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r2, r0, r1, r4
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r2, r6, r4, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r1, r12, r3
+ ldr r4, [r10, #4]
+ adds r2, r5, r2
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r6, r9
+ ldr r9, [r10, #8]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r4, [sp, #136] @ 4-byte Spill
+ adcs r12, r2, r0
+ ldr r2, [r10, #-4]
+ adc r0, r1, #0
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r10]
+ mul r1, r5, r2
+ mov r7, r2
+ umull r3, r11, r1, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ mov r6, r0
+ umull r2, r0, r1, r9
+ adds r3, r3, r5
+ umull r3, r5, r1, r4
+ adcs r3, r3, lr
+ ldr lr, [r10, #12]
+ adcs r2, r2, r8
+ umull r4, r8, r1, lr
+ adcs r1, r4, r12
+ ldr r4, [sp] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r3, r3, r11
+ adcs r2, r2, r5
+ adcs r12, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r1, r4, r8
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adds r4, r0, r4
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r5, r0, #0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adds r3, r0, r3
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r0, r2
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ mov r12, r7
+ adcs r8, r4, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adc r10, r5, #0
+ mul r5, r3, r7
+ umull r7, r11, r5, r6
+ adds r3, r7, r3
+ umull r3, r7, r5, r1
+ adcs r2, r3, r2
+ umull r3, r4, r5, r9
+ adcs r0, r3, r0
+ umull r3, r6, r5, lr
+ adcs r3, r3, r8
+ ldr r8, [sp, #8] @ 4-byte Reload
+ adc r5, r10, #0
+ adds r2, r2, r11
+ adcs r0, r0, r7
+ adcs r3, r3, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adc r7, r5, r6
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r2, r6, r2
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r3, r6, r3
+ adcs r6, r4, r7
+ adc r10, r5, #0
+ mul r5, r2, r12
+ umull r7, r11, r5, r8
+ adds r2, r7, r2
+ umull r2, r7, r5, r1
+ adcs r0, r2, r0
+ umull r2, r4, r5, r9
+ adcs r2, r2, r3
+ umull r3, r1, r5, lr
+ adcs r3, r3, r6
+ ldr r6, [sp, #128] @ 4-byte Reload
+ adc r5, r10, #0
+ adds r0, r0, r11
+ adcs r2, r2, r7
+ adcs r3, r3, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adc r1, r5, r1
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ldr r4, [sp, #124] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r0, r6, r0
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r2, r6, r2
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r3, r6, r3
+ adcs r11, r4, r1
+ adc r10, r5, #0
+ mul r5, r0, r12
+ umull r7, r1, r5, r8
+ adds r0, r7, r0
+ ldr r7, [sp, #136] @ 4-byte Reload
+ umull r0, r12, r5, r9
+ umull r6, r4, r5, r7
+ adcs r2, r6, r2
+ adcs r0, r0, r3
+ umull r3, r6, r5, lr
+ adcs r3, r3, r11
+ adc r5, r10, #0
+ adds r1, r2, r1
+ adcs r0, r0, r4
+ adcs r2, r3, r12
+ adc r3, r5, r6
+ subs r4, r1, r8
+ sbcs r7, r0, r7
+ sbcs r6, r2, r9
+ sbc r5, r3, lr
+ cmp r5, #0
+ movlt r7, r0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ movlt r4, r1
+ movlt r6, r2
+ cmp r5, #0
+ movlt r5, r3
+ stm r0, {r4, r7}
+ str r6, [r0, #8]
+ str r5, [r0, #12]
+ add sp, sp, #140
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed4L
+ .align 2
+ .type mcl_fp_montRed4L,%function
+mcl_fp_montRed4L: @ @mcl_fp_montRed4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r7, [r1, #4]
+ ldr r6, [r2, #-4]
+ ldr r10, [r1]
+ ldr r3, [r2, #8]
+ ldr r8, [r2]
+ ldr r12, [r2, #4]
+ ldr r2, [r2, #12]
+ str r0, [sp, #52] @ 4-byte Spill
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ str r6, [sp, #56] @ 4-byte Spill
+ str r3, [sp, #40] @ 4-byte Spill
+ str r2, [sp, #36] @ 4-byte Spill
+ str r8, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #44] @ 4-byte Spill
+ mul r7, r10, r6
+ umull r6, r5, r7, r3
+ str r5, [sp, #20] @ 4-byte Spill
+ mov r5, r3
+ umull r4, r3, r7, r8
+ mov lr, r6
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r9, r4, r7, r2
+ umull r11, r2, r7, r12
+ mov r0, r3
+ adds r3, r3, r11
+ umlal r0, lr, r7, r12
+ adcs r2, r2, r6
+ ldr r6, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [sp, #20] @ 4-byte Spill
+ adc r2, r4, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adds r4, r10, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ add r10, r1, #16
+ adcs r11, r2, r0
+ mul r4, r11, r6
+ umull r9, r0, r4, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r2, r4, r8
+ mov r5, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ mov r7, r2
+ umlal r7, r5, r4, r12
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r1, r8, r10}
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, lr
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r3, [sp, #48] @ 4-byte Spill
+ adcs r1, r1, r0
+ adcs r0, r8, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r8, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r10, #0
+ ldr r10, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ umull r1, lr, r4, r10
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r3, r0, r4, r12
+ adds r3, r2, r3
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #40] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r1, lr, #0
+ adds r2, r2, r11
+ adcs r11, r7, r0
+ mul r3, r11, r6
+ umull r2, r0, r3, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r6, r3, r8
+ mov r7, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r4, r6
+ umlal r4, r7, r3, r12
+ adcs r0, r5, r0
+ ldr r5, [sp] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ umull r1, r5, r3, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ umull lr, r0, r3, r12
+ adds r3, r6, lr
+ mov lr, r8
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ adc r1, r5, #0
+ adds r2, r2, r11
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r4, r2
+ adcs r3, r7, r3
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ mov r3, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r5, r2, r0
+ umull r4, r0, r5, r12
+ umull r8, r6, r5, lr
+ adds r4, r6, r4
+ umull r1, r4, r5, r3
+ adcs r0, r0, r1
+ umlal r6, r1, r5, r12
+ umull r0, r7, r5, r10
+ adcs r0, r4, r0
+ ldr r4, [sp, #44] @ 4-byte Reload
+ adc r5, r7, #0
+ adds r2, r8, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r6, r2
+ adcs r1, r1, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r9, r5, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adc r8, r4, #0
+ subs r6, r2, lr
+ sbcs r5, r1, r12
+ sbcs r4, r0, r3
+ sbcs r7, r9, r10
+ sbc r3, r8, #0
+ ands r3, r3, #1
+ movne r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r6, r2
+ movne r5, r1
+ cmp r3, #0
+ movne r7, r9
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre4L
+ .align 2
+ .type mcl_fp_addPre4L,%function
+mcl_fp_addPre4L: @ @mcl_fp_addPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r1, {r3, r12, lr}
+ ldr r1, [r1, #12]
+ ldm r2, {r4, r5, r6}
+ ldr r2, [r2, #12]
+ adds r3, r4, r3
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ adcs r1, r2, r1
+ stm r0, {r3, r5, r6}
+ str r1, [r0, #12]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre4L
+ .align 2
+ .type mcl_fp_subPre4L,%function
+mcl_fp_subPre4L: @ @mcl_fp_subPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r2, [r2, #12]
+ ldm r1, {r4, r5, r6}
+ ldr r1, [r1, #12]
+ subs r3, r4, r3
+ sbcs r5, r5, r12
+ sbcs r6, r6, lr
+ sbcs r1, r1, r2
+ stm r0, {r3, r5, r6}
+ str r1, [r0, #12]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_4L
+ .align 2
+ .type mcl_fp_shr1_4L,%function
+mcl_fp_shr1_4L: @ @mcl_fp_shr1_4L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ lsrs r1, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r1, lr, #1
+ orr r3, r3, r2, lsl #31
+ rrx r1, r2
+ lsr r2, lr, #1
+ str r12, [r0]
+ str r3, [r0, #4]
+ str r1, [r0, #8]
+ str r2, [r0, #12]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add4L
+ .align 2
+ .type mcl_fp_add4L,%function
+mcl_fp_add4L: @ @mcl_fp_add4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ adcs r7, r7, r4
+ stm r0, {r5, r6, r7}
+ adcs r4, r2, r1
+ mov r1, #0
+ ldr r2, [r3]
+ adc lr, r1, #0
+ str r4, [r0, #12]
+ ldmib r3, {r1, r12}
+ ldr r3, [r3, #12]
+ subs r5, r5, r2
+ sbcs r2, r6, r1
+ sbcs r1, r7, r12
+ sbcs r12, r4, r3
+ sbc r3, lr, #0
+ tst r3, #1
+ streq r5, [r0]
+ streq r2, [r0, #4]
+ streq r1, [r0, #8]
+ streq r12, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF4L
+ .align 2
+ .type mcl_fp_addNF4L,%function
+mcl_fp_addNF4L: @ @mcl_fp_addNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ adcs r7, r7, r4
+ adc r8, r2, r1
+ ldm r3, {r2, r4, r12, lr}
+ subs r2, r5, r2
+ sbcs r4, r6, r4
+ sbcs r3, r7, r12
+ sbc r1, r8, lr
+ cmp r1, #0
+ movlt r2, r5
+ movlt r4, r6
+ movlt r3, r7
+ cmp r1, #0
+ movlt r1, r8
+ stm r0, {r2, r4}
+ str r3, [r0, #8]
+ str r1, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub4L
+ .align 2
+ .type mcl_fp_sub4L,%function
+mcl_fp_sub4L: @ @mcl_fp_sub4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r5, [r2, #12]
+ ldm r1, {r2, r6, r7}
+ ldr r1, [r1, #12]
+ subs r8, r2, r12
+ sbcs r2, r6, lr
+ str r8, [r0]
+ sbcs r12, r7, r4
+ sbcs lr, r1, r5
+ mov r1, #0
+ sbc r1, r1, #0
+ stmib r0, {r2, r12, lr}
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, lr}
+ moveq pc, lr
+ ldm r3, {r1, r4, r5}
+ ldr r3, [r3, #12]
+ adds r1, r1, r8
+ adcs r2, r4, r2
+ adcs r7, r5, r12
+ adc r3, r3, lr
+ stm r0, {r1, r2, r7}
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF4L
+ .align 2
+ .type mcl_fp_subNF4L,%function
+mcl_fp_subNF4L: @ @mcl_fp_subNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r2, [r2, #12]
+ ldm r1, {r5, r6, r7}
+ ldr r1, [r1, #12]
+ subs r5, r5, r12
+ sbcs r6, r6, lr
+ sbcs r8, r7, r4
+ sbc r1, r1, r2
+ ldm r3, {r2, r4, r12, lr}
+ adds r2, r5, r2
+ adcs r4, r6, r4
+ adcs r3, r8, r12
+ adc r7, r1, lr
+ cmp r1, #0
+ movge r2, r5
+ movge r4, r6
+ movge r3, r8
+ cmp r1, #0
+ movge r7, r1
+ stm r0, {r2, r4}
+ str r3, [r0, #8]
+ str r7, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add4L
+ .align 2
+ .type mcl_fpDbl_add4L,%function
+mcl_fpDbl_add4L: @ @mcl_fpDbl_add4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r8, r9, r10, r11}
+ ldr r7, [r1, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ str r7, [sp, #8] @ 4-byte Spill
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r2, {r1, r6, r7, r12, lr}
+ ldr r4, [r2, #20]
+ ldr r5, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r1, r1, r8
+ adcs r6, r6, r9
+ adcs r7, r7, r10
+ adcs r12, r12, r11
+ stm r0, {r1, r6, r7, r12}
+ mov r1, #0
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r8, r5, r4
+ ldr r5, [sp, #12] @ 4-byte Reload
+ ldr r4, [r3]
+ adcs lr, r2, r5
+ adc r12, r1, #0
+ ldmib r3, {r1, r2, r3}
+ subs r4, r7, r4
+ sbcs r1, r6, r1
+ sbcs r2, r8, r2
+ sbcs r3, lr, r3
+ sbc r5, r12, #0
+ ands r5, r5, #1
+ movne r4, r7
+ movne r1, r6
+ movne r2, r8
+ cmp r5, #0
+ movne r3, lr
+ str r4, [r0, #16]
+ str r1, [r0, #20]
+ str r2, [r0, #24]
+ str r3, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub4L
+ .align 2
+ .type mcl_fpDbl_sub4L,%function
+mcl_fpDbl_sub4L: @ @mcl_fpDbl_sub4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r8, r9, r10, r11}
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r2, #28]
+ str r7, [sp, #8] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm r1, {r2, r6, r7, r12, lr}
+ ldr r4, [r1, #20]
+ ldr r5, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r2, r2, r8
+ str r2, [r0]
+ sbcs r2, r6, r9
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #4]
+ sbcs r2, r7, r10
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #8]
+ sbcs r2, r12, r11
+ str r2, [r0, #12]
+ mov r2, #0
+ sbcs r7, lr, r7
+ sbcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ sbcs r5, r5, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs lr, r1, r4
+ ldr r4, [r3]
+ ldr r1, [r3, #8]
+ sbc r12, r2, #0
+ ldr r2, [r3, #4]
+ ldr r3, [r3, #12]
+ adds r4, r7, r4
+ adcs r2, r6, r2
+ adcs r1, r5, r1
+ adc r3, lr, r3
+ ands r12, r12, #1
+ moveq r4, r7
+ moveq r2, r6
+ moveq r1, r5
+ cmp r12, #0
+ moveq r3, lr
+ str r4, [r0, #16]
+ str r2, [r0, #20]
+ str r1, [r0, #24]
+ str r3, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 2
+ .type mcl_fp_mulUnitPre5L,%function
+mcl_fp_mulUnitPre5L: @ @mcl_fp_mulUnitPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r10, [r1, #12]
+ ldr r8, [r1, #16]
+ umull r4, r9, lr, r2
+ umull r1, r6, r12, r2
+ mov r7, r6
+ mov r5, r4
+ umlal r7, r5, r3, r2
+ stm r0, {r1, r7}
+ str r5, [r0, #8]
+ umull r5, r7, r3, r2
+ umull r1, r12, r10, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r1, r9, r1
+ str r1, [r0, #12]
+ umull r1, r3, r8, r2
+ adcs r1, r12, r1
+ str r1, [r0, #16]
+ adc r1, r3, #0
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 2
+ .type mcl_fpDbl_mulPre5L,%function
+mcl_fpDbl_mulPre5L: @ @mcl_fpDbl_mulPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r9, [r1, #8]
+ ldr r10, [r1, #12]
+ umull r5, r4, r12, r3
+ umull r6, r7, lr, r3
+ adds r6, r4, r6
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r6, r9, r3
+ adcs r7, r7, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r10, r3
+ adcs r6, r6, r7
+ ldr r7, [r1, #16]
+ str r6, [sp, #28] @ 4-byte Spill
+ umull r6, r8, r7, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [r2, #4]
+ str r3, [r0]
+ umull r3, r2, r12, r6
+ adc r12, r8, #0
+ adds r8, r3, r4
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r3, r2, lr, r6
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r5, r3, r5
+ umull r3, lr, r10, r6
+ umull r4, r10, r9, r6
+ str r8, [r0, #4]
+ adcs r4, r4, r2
+ umull r2, r9, r7, r6
+ adcs r3, r3, r11
+ adcs r7, r2, r12
+ mov r2, #0
+ adc r6, r2, #0
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adds r5, r5, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r11, r4, r2
+ adcs r2, r3, r10
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r7, lr
+ ldr r7, [r1]
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r6, r9
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldmib r1, {r8, lr}
+ ldr r6, [r1, #12]
+ umull r12, r4, r7, r2
+ adds r7, r12, r5
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r12, [r1, #16]
+ str r7, [sp, #20] @ 4-byte Spill
+ umull r5, r7, r8, r2
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r10, r5, r11
+ umull r5, r7, lr, r2
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r9, r5, r7
+ umull r4, r7, r6, r2
+ mov r5, #0
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r7
+ umull r11, r7, r12, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r2, r11, r2
+ adc r11, r5, #0
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r5, r10, r5
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r5, r9, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r10, r2, r5
+ adc r2, r11, r7
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [r3, #12]
+ umull r11, r3, r6, r2
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r6, r3, lr, r2
+ umull lr, r9, r8, r2
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ umull r7, r8, r3, r2
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adds r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r5, lr, r3
+ mov r3, #0
+ adcs r6, r6, r4
+ umull r4, lr, r12, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r7, r11, r10
+ adcs r2, r4, r2
+ adc r3, r3, #0
+ adds r10, r5, r8
+ adcs r11, r6, r9
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #12]
+ adc r2, r3, lr
+ ldr r3, [r1]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r4, [r2, #16]
+ ldmib r1, {r2, r5, r6}
+ ldr r1, [r1, #16]
+ umull lr, r9, r6, r4
+ umull r6, r8, r5, r4
+ umull r5, r7, r2, r4
+ umull r2, r12, r3, r4
+ adds r10, r2, r10
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r3, r5, r11
+ str r10, [r0, #16]
+ adcs r5, r6, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r6, lr, r2
+ umull r2, lr, r1, r4
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ mov r2, #0
+ adc r2, r2, #0
+ adds r3, r3, r12
+ adcs r7, r5, r7
+ str r3, [r0, #20]
+ adcs r6, r6, r8
+ str r7, [r0, #24]
+ adcs r1, r1, r9
+ str r6, [r0, #28]
+ adc r2, r2, lr
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 2
+ .type mcl_fpDbl_sqrPre5L,%function
+mcl_fpDbl_sqrPre5L: @ @mcl_fpDbl_sqrPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r2, r3, r12}
+ ldr lr, [r1, #16]
+ ldr r9, [r1, #12]
+ umull r5, r6, r2, r2
+ umull r7, r11, r3, r2
+ str r5, [r0]
+ umull r5, r4, lr, r2
+ adds r8, r6, r7
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r10, r12, r2
+ str r4, [sp, #28] @ 4-byte Spill
+ adcs r4, r11, r5
+ umlal r6, r5, r3, r2
+ umull r4, r8, r9, r2
+ adcs r10, r10, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r8, r8, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r2, r4, r3, r3
+ str r4, [sp, #28] @ 4-byte Spill
+ adds r4, r7, r6
+ str r4, [sp, #16] @ 4-byte Spill
+ adcs r5, r2, r5
+ umull r2, r4, r12, r3
+ str r4, [sp, #12] @ 4-byte Spill
+ adcs r4, r2, r10
+ umull r2, r6, r9, r3
+ adcs r2, r2, r8
+ umull r7, r8, lr, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r3
+ mov r3, #0
+ adc r3, r3, #0
+ adds r5, r5, r11
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [sp, #16] @ 4-byte Reload
+ str r4, [r0, #4]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r7, r6
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r3, r8
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r11, r2, lr, r12
+ umull lr, r10, r12, r12
+ str r2, [sp, #28] @ 4-byte Spill
+ ldm r1, {r4, r6}
+ ldr r2, [r1, #12]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ umull r8, r3, r2, r12
+ str r3, [sp, #16] @ 4-byte Spill
+ umull r5, r3, r6, r12
+ str r3, [sp] @ 4-byte Spill
+ umull r3, r9, r4, r12
+ adds r3, r3, r7
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r5, r5, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r12, lr, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r3
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs lr, r11, r3
+ mov r3, #0
+ adc r11, r3, #0
+ ldr r3, [sp] @ 4-byte Reload
+ adds r5, r5, r9
+ adcs r12, r12, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r9, r7, r10
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r8, lr, r3
+ adc r11, r11, r7
+ umull r7, r3, r4, r2
+ adds r7, r7, r5
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r5, r3, r6, r2
+ ldr r6, [r1, #8]
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r10, r5, r12
+ ldr r3, [sp, #24] @ 4-byte Reload
+ ldr r5, [r1, #16]
+ str r7, [sp, #28] @ 4-byte Spill
+ umull r4, lr, r6, r2
+ adcs r12, r4, r9
+ ldr r4, [sp, #20] @ 4-byte Reload
+ umull r7, r9, r2, r2
+ str r3, [r0, #8]
+ adcs r7, r7, r8
+ umull r3, r8, r5, r2
+ adcs r2, r3, r11
+ mov r3, #0
+ adc r3, r3, #0
+ adds r11, r10, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r12, r4
+ adcs r10, r7, lr
+ adcs r12, r2, r9
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r8, r3, r8
+ ldr r3, [r1]
+ str r2, [r0, #12]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #12]
+ umull r7, r9, r3, r5
+ adds lr, r7, r11
+ str lr, [r0, #16]
+ umull r7, r11, r2, r5
+ adcs r2, r7, r4
+ umull r4, r7, r6, r5
+ adcs r4, r4, r10
+ umull r6, r10, r1, r5
+ adcs r1, r6, r12
+ umull r6, r3, r5, r5
+ mov r5, #0
+ adcs r6, r6, r8
+ adc r5, r5, #0
+ adds r2, r2, r9
+ adcs r4, r4, r11
+ str r2, [r0, #20]
+ adcs r1, r1, r7
+ str r4, [r0, #24]
+ adcs r7, r6, r10
+ str r1, [r0, #28]
+ adc r3, r5, r3
+ str r7, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont5L
+ .align 2
+ .type mcl_fp_mont5L,%function
+mcl_fp_mont5L: @ @mcl_fp_mont5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ ldm r0, {r2, r8}
+ ldr r7, [r0, #8]
+ ldr r0, [r0, #12]
+ ldr r6, [r3, #-4]
+ ldr r5, [r3, #8]
+ ldr r9, [r3]
+ ldr r11, [r1, #8]
+ ldr r12, [r1, #12]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ ldr r1, [r1, #16]
+ str r6, [sp, #84] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ str r9, [sp, #80] @ 4-byte Spill
+ str r11, [sp, #60] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ umull r4, lr, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ mul r0, r4, r6
+ str r4, [sp, #36] @ 4-byte Spill
+ umull r6, r4, r0, r5
+ str r4, [sp, #28] @ 4-byte Spill
+ umull r4, r5, r0, r9
+ mov r10, r6
+ mov r9, r5
+ str r4, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #12] @ 4-byte Spill
+ mov r4, r7
+ umlal r9, r10, r0, r7
+ umull r7, r5, r1, r2
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ umull r5, r1, r12, r2
+ str r1, [sp, #20] @ 4-byte Spill
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r12, r1, r11, r2
+ umull r11, r5, r7, r2
+ adds r7, lr, r11
+ adcs r5, r5, r12
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #68] @ 4-byte Spill
+ umull r7, r11, r0, r1
+ ldr r1, [r3, #12]
+ umull r3, r5, r0, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adds r3, r4, r3
+ str r1, [sp, #92] @ 4-byte Spill
+ umull r3, r4, r0, r1
+ adcs r0, r5, r6
+ mov r1, #0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ adcs r3, r4, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adc r5, r11, #0
+ umlal lr, r12, r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r2, r4, r2
+ adcs r2, r9, lr
+ ldr r9, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ adcs r2, r10, r12
+ ldr r10, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r5, r0
+ umull r5, lr, r8, r9
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r1, #0
+ umull r6, r1, r8, r7
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r12, r4, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r3, r2, r8, r0
+ umull r11, r0, r8, r10
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adds r6, r0, r6
+ adcs r1, r1, r3
+ umlal r0, r3, r8, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r12
+ adcs r2, r4, r5
+ adc r6, lr, #0
+ adds r8, r7, r11
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adcs r11, r7, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r8, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r6, r1, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ umull r1, r5, r4, r0
+ mov r0, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ mov r3, r5
+ umull r12, lr, r4, r1
+ umlal r3, r0, r4, r1
+ umull r1, r2, r4, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adds r5, r5, r12
+ adcs r6, lr, r6
+ umull r5, r12, r4, r10
+ adcs r1, r7, r1
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r5
+ adc r6, r12, #0
+ adds r7, r7, r8
+ ldr r8, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r11
+ ldr r11, [sp, #72] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r2, r1, r0, r9
+ ldr r9, [sp, #56] @ 4-byte Reload
+ umull r3, r12, r0, r8
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ mov r2, r0
+ umull r4, r5, r0, r9
+ umull r6, r7, r0, r1
+ umull lr, r0, r2, r11
+ adds r6, r0, r6
+ str lr, [sp, #8] @ 4-byte Spill
+ adcs r6, r7, r3
+ ldr r7, [sp, #4] @ 4-byte Reload
+ umlal r0, r3, r2, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r12, r12, r4
+ adcs r4, r5, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r2, r1, r2
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ adcs r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r5, r1, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ mov r2, r5
+ umull r1, r7, r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r6, r1, r4, r10
+ mov r3, r7
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ umlal r3, r2, r4, r0
+ umull r12, lr, r4, r1
+ umull r10, r1, r4, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adds r4, r7, r10
+ adcs r1, r1, r5
+ ldr r4, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r10, r0, r12
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r12, lr, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc lr, r0, #0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adds r6, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r5, r1, r0, r4
+ mov r6, r0
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r4, r1, r0, r9
+ str r5, [sp, #8] @ 4-byte Spill
+ umull r5, r9, r0, r8
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ umull r4, r8, r0, r1
+ umull r7, r0, r6, r11
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r11, r3, r7
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ adcs r10, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r12, r12, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, lr, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adc r2, r3, #0
+ adds r4, r0, r4
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r4, r8, r5
+ umlal r0, r5, r6, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r4, r9, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adc r8, r2, #0
+ adds lr, r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r9, r10, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r10, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, lr, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r12, r3, r4, r1
+ umull r7, r11, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umull r8, r6, r4, r0
+ mov r0, r7
+ mov r5, r6
+ adds r6, r6, r12
+ umlal r5, r0, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r3, r3, r7
+ umull r6, r12, r4, r1
+ umull r1, r2, r4, r10
+ adcs r1, r11, r1
+ adcs r2, r2, r6
+ adc r3, r12, #0
+ adds r7, r8, lr
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r7, r5, r7
+ adcs r0, r0, r9
+ ldr r9, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r5, [r0, #16]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r4, r8, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r7, r1, r5, r2
+ umull r12, lr, r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r6, r3, r5, r0
+ umull r11, r0, r5, r9
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r7, r0, r7
+ adcs r1, r1, r6
+ umlal r0, r6, r5, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r3, r12
+ ldr r12, [sp, #80] @ 4-byte Reload
+ adcs r4, lr, r4
+ ldr lr, [sp, #88] @ 4-byte Reload
+ adc r3, r8, #0
+ adds r7, r2, r11
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r7, r0
+ umull r0, r1, r4, r9
+ umull r8, r3, r4, r12
+ adds r0, r3, r0
+ umull r5, r0, r4, lr
+ adcs r1, r1, r5
+ umlal r3, r5, r4, r9
+ umull r1, r6, r4, r10
+ adcs r10, r0, r1
+ umull r1, r0, r4, r2
+ mov r4, r9
+ adcs r1, r6, r1
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r8, r7
+ adcs r3, r3, r6
+ adcs r7, r5, r11
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adc r9, r0, #0
+ subs r5, r3, r12
+ sbcs r4, r7, r4
+ sbcs r0, r11, lr
+ sbcs r6, r8, r1
+ sbcs r1, r10, r2
+ sbc r2, r9, #0
+ ands r2, r2, #1
+ movne r5, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ movne r4, r7
+ movne r0, r11
+ cmp r2, #0
+ movne r6, r8
+ movne r1, r10
+ str r5, [r3]
+ str r4, [r3, #4]
+ str r0, [r3, #8]
+ str r6, [r3, #12]
+ str r1, [r3, #16]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF5L
+ .align 2
+ .type mcl_fp_montNF5L,%function
+mcl_fp_montNF5L: @ @mcl_fp_montNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r4, r9, r10}
+ ldr r6, [r1, #4]
+ ldr r0, [r2, #12]
+ ldr r7, [r1]
+ ldr r5, [r1, #8]
+ ldr lr, [r3, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r6, [sp, #32] @ 4-byte Spill
+ umull r2, r8, r6, r4
+ mov r11, r6
+ umull r6, r12, r7, r4
+ str r7, [sp, #56] @ 4-byte Spill
+ str r5, [sp, #48] @ 4-byte Spill
+ str lr, [sp, #36] @ 4-byte Spill
+ adds r7, r12, r2
+ umull r2, r7, r5, r4
+ adcs r5, r8, r2
+ umlal r12, r2, r11, r4
+ umull r5, r8, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r7, r5
+ ldr r5, [r3, #4]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ str r5, [sp, #60] @ 4-byte Spill
+ umull r1, r7, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ adcs r0, r8, r1
+ ldr r1, [r3]
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r7, #0
+ ldr r7, [r3, #-4]
+ str r0, [sp, #12] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ mul r0, r6, r7
+ str r7, [sp, #72] @ 4-byte Spill
+ umull r8, r7, r0, r1
+ ldr r1, [r3, #12]
+ ldr r3, [r3, #16]
+ adds r6, r8, r6
+ umull r4, r8, r0, r5
+ str r7, [sp, #8] @ 4-byte Spill
+ umull r5, r7, r0, lr
+ ldr lr, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r12
+ adcs r5, r5, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r2, r4, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r3, [sp, #44] @ 4-byte Spill
+ adcs r2, r2, r1
+ umull r12, r1, r0, r3
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r0, r12, r0
+ adc r12, r3, #0
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adds r6, r6, r3
+ adcs r3, r5, r8
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r0, r0, r4
+ umull r7, r4, r9, r11
+ str r2, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r12, r1
+ ldr r12, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r5, r1, r9, r8
+ adds r7, r1, r7
+ umull r2, r7, r9, r0
+ adcs r4, r4, r2
+ umlal r1, r2, r9, r11
+ ldr r11, [sp, #44] @ 4-byte Reload
+ umull r4, r0, r9, r12
+ adcs r4, r7, r4
+ umull r7, r3, r9, lr
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ adc r3, r3, #0
+ adds r7, r5, r6
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r6, r4, r5
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r3, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r4, r3, r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adds r7, r4, r7
+ ldr r4, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ umull r7, r3, r5, r0
+ adcs r1, r7, r1
+ umull r7, r0, r5, r9
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ adcs r2, r7, r2
+ umull r7, r0, r5, r4
+ adcs r6, r7, r6
+ umull r7, r4, r5, r11
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r1, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r0, r7, r0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #8] @ 4-byte Spill
+ adc r11, r5, r4
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r4, r0, r10, r8
+ ldr r8, [sp, #60] @ 4-byte Reload
+ umull r6, r5, r10, r7
+ adds r6, r0, r6
+ umull r1, r6, r10, r3
+ adcs r5, r5, r1
+ umlal r0, r1, r10, r7
+ umull r5, r2, r10, r12
+ adcs r12, r6, r5
+ umull r6, r5, r10, lr
+ mov lr, r7
+ adcs r2, r2, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r6, r4, r6
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r10, r12, r4
+ adcs r2, r2, r11
+ ldr r11, [sp, #40] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r5, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ mul r7, r6, r2
+ umull r4, r2, r7, r11
+ adds r6, r4, r6
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r6, r2, r7, r8
+ str r2, [sp, #4] @ 4-byte Spill
+ adcs r0, r6, r0
+ umull r6, r2, r7, r9
+ ldr r9, [sp, #52] @ 4-byte Reload
+ adcs r1, r6, r1
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [sp, #8] @ 4-byte Reload
+ umull r6, r12, r7, r9
+ adcs r5, r6, r10
+ ldr r10, [sp, #44] @ 4-byte Reload
+ umull r6, r4, r7, r10
+ adcs r7, r6, r2
+ ldr r6, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r0, r0, r2
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r7, r12
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r6, r4
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r1, r5, r7, r3
+ mov r6, r1
+ umull r4, r2, r7, r0
+ mov r0, lr
+ mov r12, r2
+ umull r3, lr, r7, r0
+ umlal r12, r6, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adds r2, r2, r3
+ adcs r1, lr, r1
+ umull r1, r2, r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r5, r1
+ umull r3, r5, r7, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r3
+ adc r3, r5, #0
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adds r7, r4, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r6, r6, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [sp, #20] @ 4-byte Spill
+ adc r2, r3, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ umull r4, lr, r5, r11
+ adds r7, r4, r7
+ umull r7, r12, r5, r8
+ adcs r0, r7, r0
+ umull r7, r3, r5, r2
+ adcs r6, r7, r6
+ umull r7, r2, r5, r9
+ adcs r1, r7, r1
+ umull r7, r4, r5, r10
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r0, r0, lr
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r6, r12
+ adcs lr, r1, r3
+ adcs r8, r7, r2
+ adc r9, r5, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [r0, #16]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r3, r11, r7, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r5, r3
+ umull r12, r2, r7, r0
+ umull r6, r0, r7, r4
+ mov r1, r2
+ adds r2, r2, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ umlal r1, r5, r7, r4
+ umull r0, r2, r7, r3
+ umull r3, r4, r7, r6
+ adcs r0, r11, r0
+ adcs r2, r2, r3
+ adc r3, r4, #0
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adds r7, r12, r4
+ ldr r12, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r10
+ adcs r6, r5, lr
+ adcs r11, r0, r8
+ ldr r8, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r9
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ adc r0, r3, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r0
+ umull r4, r0, r5, r8
+ umull r3, lr, r5, r12
+ adds r7, r4, r7
+ ldr r4, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ adcs r1, r3, r1
+ ldr r9, [sp, #72] @ 4-byte Reload
+ umull r7, r0, r5, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r3, r7, r6
+ umull r6, r10, r5, r2
+ adcs r7, r6, r11
+ umull r6, r11, r5, r0
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r6, r6, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r1, r9
+ adcs lr, r3, lr
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r9, r7, r3
+ adcs r10, r6, r10
+ adc r11, r5, r11
+ subs r6, r1, r8
+ sbcs r5, lr, r12
+ sbcs r4, r9, r4
+ sbcs r7, r10, r2
+ sbc r3, r11, r0
+ asr r0, r3, #31
+ cmp r0, #0
+ movlt r6, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movlt r5, lr
+ movlt r4, r9
+ cmp r0, #0
+ movlt r7, r10
+ movlt r3, r11
+ str r6, [r1]
+ str r5, [r1, #4]
+ str r4, [r1, #8]
+ str r7, [r1, #12]
+ str r3, [r1, #16]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed5L
+ .align 2
+ .type mcl_fp_montRed5L,%function
+mcl_fp_montRed5L: @ @mcl_fp_montRed5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ ldr r6, [r1, #4]
+ ldr r9, [r2, #-4]
+ ldr r4, [r1]
+ ldr r8, [r2, #8]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2]
+ ldr r10, [r2, #4]
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r1, #8]
+ mul r5, r4, r9
+ str r4, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ str r9, [sp, #64] @ 4-byte Spill
+ str r8, [sp, #68] @ 4-byte Spill
+ umull lr, r4, r5, r8
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r3, r5, r0
+ mov r12, lr
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #12]
+ mov r0, r3
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r1, #12]
+ umlal r0, r12, r5, r10
+ str r4, [sp, #76] @ 4-byte Spill
+ str r2, [sp, #80] @ 4-byte Spill
+ str r6, [sp, #52] @ 4-byte Spill
+ umull r7, r6, r5, r4
+ str r6, [sp, #28] @ 4-byte Spill
+ umull r4, r6, r5, r2
+ umull r11, r2, r5, r10
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r3, r3, r11
+ ldr r11, [r1, #36]
+ adcs r2, r2, lr
+ ldr r3, [sp, #24] @ 4-byte Reload
+ add lr, r1, #16
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r6, r2
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r5, r3, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r3, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r0
+ mul r0, r2, r9
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r9, [r1, #28]
+ umull r6, r2, r0, r8
+ str r2, [sp, #40] @ 4-byte Spill
+ umull r2, r4, r0, r3
+ mov r5, r6
+ mov r8, r6
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #32]
+ mov r7, r4
+ umlal r7, r5, r0, r10
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r1, r2, lr}
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r3, r6, r12
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r6, r6, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r6, [sp, #56] @ 4-byte Spill
+ adcs r1, r1, r3
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ adcs r1, lr, #0
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ adcs r1, r9, #0
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ adcs r1, r11, #0
+ umull r6, r11, r0, lr
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ umull r2, r3, r0, r1
+ umull r9, r1, r0, r10
+ adds r0, r4, r9
+ adcs r0, r1, r8
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r6
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r11, r11, #0
+ adds r3, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r7, r0
+ mul r7, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r8, r0, r7, r1
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ mov r12, r8
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #56] @ 4-byte Reload
+ mov r4, r0
+ umlal r4, r12, r7, r10
+ adcs r3, r5, r3
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r3, r5, r3
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r11, r3
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adc r3, r3, #0
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r5, r3, r7, lr
+ ldr lr, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ umull r9, r3, r7, r10
+ str r5, [sp, #24] @ 4-byte Spill
+ adds r0, r0, r9
+ adcs r0, r3, r8
+ ldr r3, [sp, #8] @ 4-byte Reload
+ ldr r0, [sp, #4] @ 4-byte Reload
+ umull r5, r11, r7, lr
+ adcs r9, r0, r5
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, #0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adds r3, r3, r0
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r11, r4, r0
+ mul r7, r11, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ umull r3, r0, r7, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r1, r0, r7, r6
+ mov r5, r3
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ mov r4, r0
+ umlal r4, r5, r7, r10
+ adcs r1, r12, r1
+ umull r12, r6, r7, lr
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ umull r9, r1, r7, r2
+ str r1, [sp, #20] @ 4-byte Spill
+ umull r8, r1, r7, r10
+ adds r0, r0, r8
+ ldr r8, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ adcs r1, r6, r9
+ adc r7, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adds r3, r3, r11
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r12, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r3, r5, r3
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r4, r12, r0
+ umull r0, r1, r4, r10
+ umull r11, r5, r4, r8
+ adds r0, r5, r0
+ umull r6, r0, r4, r7
+ adcs r1, r1, r6
+ umlal r5, r6, r4, r10
+ umull r1, r3, r4, lr
+ adcs r9, r0, r1
+ umull r1, r0, r4, r2
+ adcs r1, r3, r1
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r2, r11, r12
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r2, r5, r2
+ adcs r3, r6, r3
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs lr, r9, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r6
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adc r12, r0, #0
+ subs r5, r2, r8
+ sbcs r4, r3, r10
+ sbcs r0, lr, r7
+ sbcs r6, r9, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r11, r1
+ sbc r7, r12, #0
+ ands r7, r7, #1
+ movne r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ movne r4, r3
+ movne r0, lr
+ cmp r7, #0
+ movne r6, r9
+ movne r1, r11
+ str r5, [r2]
+ str r4, [r2, #4]
+ str r0, [r2, #8]
+ str r6, [r2, #12]
+ str r1, [r2, #16]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre5L
+ .align 2
+ .type mcl_fp_addPre5L,%function
+mcl_fp_addPre5L: @ @mcl_fp_addPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r5, r6, r7}
+ ldr r2, [r1, #12]
+ ldr r1, [r1, #16]
+ adds r3, r3, r5
+ adcs r6, r12, r6
+ adcs r7, lr, r7
+ adcs r2, r4, r2
+ stm r0, {r3, r6, r7}
+ adcs r1, r8, r1
+ str r2, [r0, #12]
+ str r1, [r0, #16]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre5L
+ .align 2
+ .type mcl_fp_subPre5L,%function
+mcl_fp_subPre5L: @ @mcl_fp_subPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r5, r6, r7}
+ ldr r2, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r3, r5, r3
+ sbcs r6, r6, r12
+ sbcs r7, r7, lr
+ sbcs r2, r2, r4
+ stm r0, {r3, r6, r7}
+ sbcs r1, r1, r8
+ str r2, [r0, #12]
+ str r1, [r0, #16]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_5L
+ .align 2
+ .type mcl_fp_shr1_5L,%function
+mcl_fp_shr1_5L: @ @mcl_fp_shr1_5L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r1, [r1, #16]
+ lsrs r4, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r4, lr, #1
+ orr r3, r3, r2, lsl #31
+ lsr r4, lr, #1
+ rrx r2, r2
+ str r12, [r0]
+ str r3, [r0, #4]
+ orr r4, r4, r1, lsl #31
+ lsr r1, r1, #1
+ str r2, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add5L
+ .align 2
+ .type mcl_fp_add5L,%function
+mcl_fp_add5L: @ @mcl_fp_add5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r9, [r2, #8]
+ ldr r5, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r6, r7}
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #12]
+ ldr r1, [r1, #16]
+ adds r6, r12, r6
+ adcs r7, lr, r7
+ adcs r2, r9, r2
+ stm r0, {r6, r7}
+ adcs r5, r5, r4
+ mov r4, #0
+ str r2, [r0, #8]
+ adcs r1, r8, r1
+ str r5, [r0, #12]
+ str r1, [r0, #16]
+ adc r8, r4, #0
+ ldm r3, {r4, r12, lr}
+ ldr r9, [r3, #12]
+ ldr r3, [r3, #16]
+ subs r6, r6, r4
+ sbcs r7, r7, r12
+ sbcs r2, r2, lr
+ sbcs r12, r5, r9
+ sbcs lr, r1, r3
+ sbc r1, r8, #0
+ tst r1, #1
+ stmeq r0!, {r6, r7}
+ stmeq r0, {r2, r12, lr}
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF5L
+ .align 2
+ .type mcl_fp_addNF5L,%function
+mcl_fp_addNF5L: @ @mcl_fp_addNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r12, lr}
+ ldr r9, [r1, #8]
+ ldr r5, [r1, #12]
+ ldr r8, [r1, #16]
+ ldm r2, {r6, r7}
+ ldr r1, [r2, #8]
+ ldr r4, [r2, #12]
+ ldr r2, [r2, #16]
+ adds r6, r6, r12
+ adcs r10, r7, lr
+ adcs r9, r1, r9
+ adcs lr, r4, r5
+ ldr r4, [r3]
+ adc r12, r2, r8
+ ldmib r3, {r2, r5}
+ ldr r1, [r3, #12]
+ ldr r3, [r3, #16]
+ subs r4, r6, r4
+ sbcs r2, r10, r2
+ sbcs r5, r9, r5
+ sbcs r1, lr, r1
+ sbc r3, r12, r3
+ asr r7, r3, #31
+ cmp r7, #0
+ movlt r4, r6
+ movlt r2, r10
+ movlt r5, r9
+ cmp r7, #0
+ movlt r1, lr
+ movlt r3, r12
+ str r4, [r0]
+ str r2, [r0, #4]
+ str r5, [r0, #8]
+ str r1, [r0, #12]
+ str r3, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub5L
+ .align 2
+ .type mcl_fp_sub5L,%function
+mcl_fp_sub5L: @ @mcl_fp_sub5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldm r2, {r8, r12, lr}
+ ldr r9, [r2, #12]
+ ldr r6, [r2, #16]
+ ldm r1, {r2, r7}
+ ldr r4, [r1, #8]
+ ldr r5, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r8, r2, r8
+ sbcs r2, r7, r12
+ str r8, [r0]
+ sbcs r12, r4, lr
+ sbcs lr, r5, r9
+ sbcs r4, r1, r6
+ mov r1, #0
+ stmib r0, {r2, r12, lr}
+ sbc r1, r1, #0
+ str r4, [r0, #16]
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, r9, r11, lr}
+ moveq pc, lr
+ ldm r3, {r1, r5, r6, r7}
+ ldr r3, [r3, #16]
+ adds r1, r1, r8
+ adcs r2, r5, r2
+ adcs r6, r6, r12
+ adcs r7, r7, lr
+ adc r3, r3, r4
+ stm r0, {r1, r2, r6, r7}
+ str r3, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF5L
+ .align 2
+ .type mcl_fp_subNF5L,%function
+mcl_fp_subNF5L: @ @mcl_fp_subNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r9, [r2, #8]
+ ldr r5, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r6, r7}
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r11, r6, r12
+ sbcs r10, r7, lr
+ sbcs lr, r2, r9
+ add r9, r3, #8
+ sbcs r12, r4, r5
+ ldm r3, {r4, r5}
+ sbc r1, r1, r8
+ ldm r9, {r2, r8, r9}
+ asr r6, r1, #31
+ adds r4, r11, r4
+ adcs r5, r10, r5
+ adcs r2, lr, r2
+ adcs r3, r12, r8
+ adc r7, r1, r9
+ cmp r6, #0
+ movge r4, r11
+ movge r5, r10
+ movge r2, lr
+ cmp r6, #0
+ movge r3, r12
+ movge r7, r1
+ str r4, [r0]
+ str r5, [r0, #4]
+ str r2, [r0, #8]
+ str r3, [r0, #12]
+ str r7, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add5L
+ .align 2
+ .type mcl_fpDbl_add5L,%function
+mcl_fpDbl_add5L: @ @mcl_fpDbl_add5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r12, [r1]
+ ldr r9, [r1, #4]
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r5, [r2, #16]
+ ldr r11, [r2]
+ ldr r4, [r2, #12]
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ adds lr, r11, r12
+ ldr r11, [r2, #32]
+ add r12, r1, #16
+ adcs r6, r6, r9
+ add r9, r1, #28
+ adcs r7, r7, r8
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #36]
+ str r5, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ adcs r5, r4, r10
+ ldm r9, {r4, r8, r9}
+ ldm r12, {r1, r2, r12}
+ str lr, [r0]
+ stmib r0, {r6, r7}
+ ldr r7, [sp] @ 4-byte Reload
+ str r5, [r0, #12]
+ adcs r1, r7, r1
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r7, r2
+ mov r7, #0
+ adcs r12, r1, r12
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r10, r1, r4
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs lr, r1, r9
+ adc r1, r7, #0
+ ldr r7, [r3]
+ ldmib r3, {r4, r5, r6}
+ ldr r3, [r3, #16]
+ subs r7, r2, r7
+ sbcs r4, r12, r4
+ sbcs r5, r10, r5
+ sbcs r6, r8, r6
+ sbcs r3, lr, r3
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r7, r2
+ movne r4, r12
+ movne r5, r10
+ cmp r1, #0
+ movne r6, r8
+ movne r3, lr
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ str r6, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub5L
+ .align 2
+ .type mcl_fpDbl_sub5L,%function
+mcl_fpDbl_sub5L: @ @mcl_fpDbl_sub5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldr r7, [r2, #32]
+ add r8, r1, #12
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldmib r2, {r9, r10, r11}
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ ldr r2, [r2]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldm r8, {r4, r5, r6, r7, r8}
+ ldm r1, {r1, r12, lr}
+ subs r1, r1, r2
+ sbcs r2, r12, r9
+ stm r0, {r1, r2}
+ sbcs r1, lr, r10
+ str r1, [r0, #8]
+ sbcs r1, r4, r11
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r5, r1
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r2, r6, r2
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #16]
+ mov r1, #0
+ sbcs r7, r7, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r9, r8, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r8, r5, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ sbcs lr, r5, r6
+ sbc r12, r1, #0
+ ldm r3, {r1, r4, r5, r6}
+ ldr r3, [r3, #16]
+ adds r1, r2, r1
+ adcs r4, r7, r4
+ adcs r5, r9, r5
+ adcs r6, r8, r6
+ adc r3, lr, r3
+ ands r12, r12, #1
+ moveq r1, r2
+ moveq r4, r7
+ moveq r5, r9
+ cmp r12, #0
+ moveq r6, r8
+ moveq r3, lr
+ str r1, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ str r6, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 2
+ .type mcl_fp_mulUnitPre6L,%function
+mcl_fp_mulUnitPre6L: @ @mcl_fp_mulUnitPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r11, [r1, #12]
+ ldr r9, [r1, #16]
+ ldr r8, [r1, #20]
+ umull r4, r10, lr, r2
+ umull r1, r7, r12, r2
+ mov r5, r7
+ mov r6, r4
+ umlal r5, r6, r3, r2
+ stm r0, {r1, r5, r6}
+ umull r5, r6, r3, r2
+ umull r1, r12, r11, r2
+ adds r3, r7, r5
+ adcs r3, r6, r4
+ adcs r1, r10, r1
+ str r1, [r0, #12]
+ umull r1, r3, r9, r2
+ adcs r1, r12, r1
+ str r1, [r0, #16]
+ umull r1, r7, r8, r2
+ adcs r1, r3, r1
+ str r1, [r0, #20]
+ adc r1, r7, #0
+ str r1, [r0, #24]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 2
+ .type mcl_fpDbl_mulPre6L,%function
+mcl_fpDbl_mulPre6L: @ @mcl_fpDbl_mulPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r2, [r1, #8]
+ mov r8, r0
+ ldr r10, [r1, #12]
+ umull r0, r4, r12, r3
+ umull r6, r7, lr, r3
+ str r2, [sp, #24] @ 4-byte Spill
+ adds r6, r4, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r5, r6, r2, r3
+ adcs r7, r7, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r10, r3
+ adcs r0, r6, r7
+ ldr r7, [r1, #16]
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r6, r0, r7, r3
+ adcs r2, r11, r6
+ ldr r6, [r1, #20]
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r11, r2, r6, r3
+ adcs r0, r0, r11
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #12] @ 4-byte Spill
+ str r0, [r8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r3, [r0, #4]
+ umull r11, r9, r12, r3
+ adds r2, r11, r4
+ umull r4, r11, lr, r3
+ str r9, [sp, #28] @ 4-byte Spill
+ adcs lr, r4, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r4, r2, r10, r3
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r2, r10, r5, r3
+ ldr r5, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adcs r4, r4, r5
+ umull r5, r9, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r12, r6, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r7, r7, r3
+ mov r3, #0
+ adc r6, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adds r3, lr, r3
+ adcs r2, r2, r11
+ adcs lr, r4, r10
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r10, r5, r4
+ ldr r4, [r1, #8]
+ adcs r11, r7, r9
+ ldr r9, [r1, #4]
+ adc r7, r6, r12
+ ldr r6, [r0, #8]
+ ldr r0, [r1]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #8] @ 4-byte Spill
+ umull r12, r5, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ adds r0, r12, r3
+ str r7, [r8, #4]
+ ldr r7, [r1, #12]
+ ldr r12, [r1, #20]
+ str r5, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r9, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r3, r0, r4, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r3, lr
+ ldr lr, [r1, #16]
+ ldr r9, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r2, r0, r7, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r10
+ umull r10, r5, lr, r6
+ adcs r10, r10, r11
+ umull r11, r3, r12, r6
+ adcs r6, r11, r0
+ mov r0, #0
+ adc r11, r0, #0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adds r0, r9, r0
+ ldr r9, [sp, #4] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r9, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r10, r10, r0
+ adcs r0, r6, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r11, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r0, [r8, #8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r6, [r0, #12]
+ umull r11, r3, r7, r6
+ str r3, [sp, #36] @ 4-byte Spill
+ umull r7, r3, r4, r6
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r4, r3, r5, r6
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #40] @ 4-byte Reload
+ umull r5, r2, r3, r6
+ ldr r3, [sp] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adds r3, r5, r3
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r4, r4, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r9
+ adcs r9, r11, r10
+ umull r5, r11, lr, r6
+ adcs r3, r5, r3
+ umull r5, r10, r12, r6
+ mov r6, #0
+ adcs r2, r5, r2
+ adc r5, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r12, r4, r6
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs lr, r7, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r9, r9, r4
+ adcs r3, r3, r7
+ adcs r2, r2, r11
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ adc r2, r5, r10
+ ldr r5, [r0, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r2, [r8, #12]
+ ldr r2, [r1]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldmib r1, {r0, r6}
+ umull r7, r4, r2, r5
+ ldr r3, [r1, #12]
+ adds r2, r7, r12
+ str r4, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r7, r2, r0, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r7, lr
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r4, r2, r6, r5
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r4, r9
+ ldr r4, [sp, #28] @ 4-byte Reload
+ ldr r9, [sp, #4] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ umull r7, r2, r3, r5
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r2
+ ldr r2, [r1, #16]
+ ldr r1, [r1, #20]
+ umull r10, lr, r2, r5
+ umull r11, r12, r1, r5
+ adcs r10, r10, r4
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r5, r11, r4
+ mov r4, #0
+ adc r11, r4, #0
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r9, [sp] @ 4-byte Reload
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r9, r4
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r4, r7, r4
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r10, r10, r4
+ adcs lr, r5, lr
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adc r7, r11, r12
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r5, [r5, #20]
+ str r7, [r8, #16]
+ umull r11, r7, r3, r5
+ str r7, [sp, #44] @ 4-byte Spill
+ umull r3, r7, r6, r5
+ umull r6, r12, r0, r5
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ umull r4, r0, r7, r5
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adds r9, r4, r7
+ ldr r4, [sp, #24] @ 4-byte Reload
+ str r9, [r8, #20]
+ adcs r6, r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r4
+ adcs r7, r11, r10
+ umull r4, r10, r2, r5
+ adcs r2, r4, lr
+ umull r4, lr, r1, r5
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ mov r4, #0
+ adc r4, r4, #0
+ adds r5, r6, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r3, r3, r12
+ str r5, [r8, #24]
+ str r3, [r8, #28]
+ adcs r3, r7, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r3, [r8, #32]
+ adcs r2, r2, r0
+ adcs r1, r1, r10
+ str r2, [r8, #36]
+ str r1, [r8, #40]
+ adc r1, r4, lr
+ str r1, [r8, #44]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 2
+ .type mcl_fpDbl_sqrPre6L,%function
+mcl_fpDbl_sqrPre6L: @ @mcl_fpDbl_sqrPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r1, {r2, r3}
+ ldr r7, [r1, #12]
+ mov lr, r0
+ ldr r0, [r1, #8]
+ ldr r9, [r1, #16]
+ ldr r12, [r1, #20]
+ umull r10, r6, r7, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r4, r8, r0, r2
+ umull r5, r0, r2, r2
+ str r7, [sp, #44] @ 4-byte Spill
+ str r6, [sp, #36] @ 4-byte Spill
+ umull r6, r7, r3, r2
+ str r5, [sp, #24] @ 4-byte Spill
+ adds r11, r0, r6
+ ldr r5, [sp, #36] @ 4-byte Reload
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r7, r4
+ umlal r0, r4, r3, r2
+ adcs r7, r8, r10
+ str r7, [sp, #40] @ 4-byte Spill
+ umull r7, r10, r9, r2
+ adcs r7, r5, r7
+ str r7, [sp, #32] @ 4-byte Spill
+ umull r7, r8, r12, r2
+ adcs r11, r10, r7
+ adc r2, r8, #0
+ adds r0, r6, r0
+ umull r6, r10, r3, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r0, [lr]
+ umull r6, r0, r12, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r5, r0, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r9, r12, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r7, r8, r0, r3
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r7, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r9, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r11
+ adcs r6, r6, r2
+ mov r2, #0
+ adc r2, r2, #0
+ adds r4, r4, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r11, r3, r10
+ adcs r8, r7, r8
+ ldr r7, [r1, #4]
+ adcs r10, r5, r12
+ ldr r5, [r1, #12]
+ str r0, [lr, #4]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ adcs r0, r6, r0
+ ldr r6, [r1, #8]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1]
+ umull r3, r2, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r3, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r3, r11
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r4, r0, r6, r6
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r4, r8
+ umull r12, r4, r5, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r12, r10
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r2, r6
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r9, r9, r0
+ ldr r0, [r1, #20]
+ umull r11, r8, r0, r6
+ adcs r6, r11, r3
+ mov r3, #0
+ adc r11, r3, #0
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adds r3, r10, r3
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r7, r3
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ adcs r3, r9, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ ldr r9, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r3, r6, r3
+ str r3, [sp, #12] @ 4-byte Spill
+ umull r6, r3, r0, r5
+ adc r11, r11, r8
+ str r3, [sp, #44] @ 4-byte Spill
+ umull r3, r0, r2, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r2, r0, r5, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r0, r10, r4, r5
+ umull r4, r8, r9, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r5, r12, r5
+ adcs r2, r2, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r7
+ mov r7, #0
+ adcs r6, r6, r11
+ adc r7, r7, #0
+ adds r9, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r11, r5, r10
+ adcs r0, r2, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ add r3, r1, #8
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r12, r6, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r0, [lr, #8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r4, [lr, #12]
+ adc r0, r7, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r1, {r4, r6}
+ ldm r3, {r0, r2, r3}
+ ldr r1, [r1, #20]
+ umull r5, r7, r2, r1
+ str r5, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #52] @ 4-byte Spill
+ umull r5, r7, r0, r1
+ str r5, [sp, #28] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r5, r7, r6, r1
+ str r5, [sp, #24] @ 4-byte Spill
+ str r7, [sp, #44] @ 4-byte Spill
+ umull r5, r7, r4, r1
+ str r5, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ umull r7, r5, r2, r3
+ str r5, [sp, #4] @ 4-byte Spill
+ umull r2, r5, r0, r3
+ umull r0, r10, r6, r3
+ umull r6, r8, r4, r3
+ adds r4, r6, r9
+ str r5, [sp] @ 4-byte Spill
+ adcs r11, r0, r11
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r9, r3, r3
+ adcs r5, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r6, r7, r0
+ umull r0, r2, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ mov r7, #0
+ adcs r12, r4, r12
+ ldr r4, [sp] @ 4-byte Reload
+ adcs r3, r0, r3
+ adc r7, r7, #0
+ adds r8, r11, r8
+ adcs r5, r5, r10
+ adcs r6, r6, r4
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r12, r4
+ adcs r3, r3, r9
+ adc r10, r7, r2
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adds r12, r7, r8
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r9, r7, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r4, r5, r4
+ adcs r0, r0, r3
+ umull r3, r8, r1, r1
+ adcs r1, r3, r10
+ mov r3, #0
+ adc r3, r3, #0
+ adds r5, r9, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r1, r1, r2
+ adc r2, r3, r8
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [lr, #16]
+ add r3, lr, #36
+ str r12, [lr, #20]
+ str r5, [lr, #24]
+ str r6, [lr, #28]
+ str r4, [lr, #32]
+ stm r3, {r0, r1, r2}
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont6L
+ .align 2
+ .type mcl_fp_mont6L,%function
+mcl_fp_mont6L: @ @mcl_fp_mont6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #116
+ sub sp, sp, #116
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r0, {r2, r6, r7}
+ ldr r0, [r0, #12]
+ ldr r5, [r3, #8]
+ ldr r9, [r3]
+ ldr r11, [r1, #8]
+ ldr lr, [r1, #12]
+ ldr r12, [r3, #4]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1]
+ str r5, [sp, #92] @ 4-byte Spill
+ str r9, [sp, #84] @ 4-byte Spill
+ str r11, [sp, #100] @ 4-byte Spill
+ str lr, [sp, #64] @ 4-byte Spill
+ str r12, [sp, #112] @ 4-byte Spill
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r3, #-4]
+ umull r4, r8, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ str r4, [sp, #44] @ 4-byte Spill
+ mul r0, r4, r7
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ ldr r1, [r1, #16]
+ umull r10, r4, r0, r5
+ str r4, [sp, #36] @ 4-byte Spill
+ umull r4, r5, r0, r9
+ str r10, [sp, #16] @ 4-byte Spill
+ mov r9, r5
+ str r5, [sp, #12] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r5, r4, r7, r2
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ umlal r9, r10, r0, r12
+ str r5, [sp, #72] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ umull r5, r4, r1, r2
+ str r4, [sp, #68] @ 4-byte Spill
+ umull r1, r4, lr, r2
+ str r5, [sp, #28] @ 4-byte Spill
+ umull lr, r5, r11, r2
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r11, r4, r7, r2
+ adds r7, r8, r11
+ adcs r4, r4, lr
+ ldr r7, [r3, #12]
+ adcs r1, r5, r1
+ ldr r4, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ ldr r4, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #72] @ 4-byte Spill
+ adcs r1, r4, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ umull r11, r4, r0, r1
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r3, r4, r0, r12
+ adds r3, r5, r3
+ str r1, [sp, #68] @ 4-byte Spill
+ umull r5, r12, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r4, r4, r1
+ umull r4, r3, r0, r7
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r1, r0, r4
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r5
+ adcs r4, r12, r11
+ mov r12, #0
+ adc r5, r0, #0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ umlal r8, lr, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r2, r7, r2
+ adcs r2, r9, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r2, r10, lr
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r3, r1
+ mov r3, r0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adc r11, r12, #0
+ umull lr, r10, r6, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r7, r4, r6, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ umull r5, r12, r6, r1
+ umull r1, r8, r6, r0
+ umull r9, r0, r6, r2
+ adds r1, r0, r1
+ adcs r1, r8, r5
+ ldr r8, [sp, #64] @ 4-byte Reload
+ umlal r0, r5, r6, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ umull r1, r2, r6, r8
+ adcs r1, r12, r1
+ adcs r2, r2, r7
+ adcs r12, r4, lr
+ adc r4, r10, #0
+ adds r7, r3, r9
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adcs r9, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r11, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ umull lr, r3, r0, r5
+ umull r6, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r11, r2, r0, r1
+ mov r1, r6
+ mov r4, r2
+ adds r2, r2, lr
+ umlal r4, r1, r0, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r2, lr, r0, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ umull r3, r6, r0, r5
+ adcs r12, r12, r3
+ umull r5, r3, r0, r10
+ adcs r0, r6, r5
+ adcs r2, r3, r2
+ adc r3, lr, #0
+ adds r7, r11, r7
+ adcs r7, r4, r9
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ umull r4, r5, r2, r8
+ ldr r8, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r3, r1, r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ mov r3, r2
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r6, r9, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r1, lr, r2, r0
+ umull r11, r0, r3, r8
+ umull r2, r12, r3, r7
+ adds r2, r0, r2
+ str r11, [sp, #12] @ 4-byte Spill
+ adcs r2, r12, r1
+ umlal r0, r1, r3, r7
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r2, lr, r4
+ adcs r4, r5, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r6, r9, r6
+ adc r5, r5, #0
+ adds r8, r3, r7
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ umull r2, r3, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ umull r3, r5, r0, r1
+ mov r1, r2
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #76] @ 4-byte Reload
+ mov r4, r5
+ umlal r4, r1, r0, r7
+ umull r9, r6, r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #12] @ 4-byte Spill
+ umull r6, lr, r0, r10
+ umull r12, r10, r0, r3
+ umull r11, r3, r0, r7
+ adds r0, r5, r11
+ adcs r0, r3, r2
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r11, r0, r12
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r10, r10, r6
+ adcs lr, lr, r9
+ adc r9, r0, #0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adds r6, r0, r8
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r8, [sp, #88] @ 4-byte Reload
+ umull r7, r2, r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r7, r2, r3, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ umull r5, r2, r3, r0
+ str r2, [sp] @ 4-byte Spill
+ umull r2, r0, r3, r8
+ umull r6, r12, r3, r7
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r11, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r10, r10, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc lr, r1, #0
+ adds r6, r0, r6
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r12, r5
+ umlal r0, r5, r3, r7
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r9, r2, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r6, r2, r3, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adc r8, r1, #0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adds r4, r4, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r0, r11, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r10, r9
+ ldr r10, [sp, #80] @ 4-byte Reload
+ ldr r9, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, lr, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r10
+ umull r2, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r3, r7, r0, r1
+ mov r1, r2
+ str r3, [sp, #24] @ 4-byte Spill
+ umull lr, r3, r0, r5
+ mov r6, r7
+ adds r7, r7, lr
+ umlal r6, r1, r0, r5
+ adcs r2, r3, r2
+ umull r7, lr, r0, r11
+ umull r2, r3, r0, r9
+ adcs r12, r12, r2
+ umull r5, r2, r0, r8
+ adcs r0, r3, r5
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adc r3, lr, #0
+ adds r7, r7, r4
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r6, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r0, #16]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r12, lr, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r5, r6, r4, r3
+ umull r2, r8, r4, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r7, r1, r4, r0
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r5, r1, r5
+ umull r0, r5, r4, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r0
+ umlal r1, r0, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r2, r5, r2
+ umull r5, r6, r4, r7
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r7, r8, r5
+ adcs r6, r6, r12
+ adc r5, lr, #0
+ adds r8, r3, r4
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r8, r10
+ umull r5, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull lr, r3, r0, r6
+ umull r10, r2, r0, r1
+ mov r1, r5
+ mov r4, r2
+ adds r2, r2, lr
+ adcs r3, r3, r5
+ umlal r4, r1, r0, r6
+ umull r2, lr, r0, r11
+ ldr r11, [sp, #88] @ 4-byte Reload
+ umull r3, r5, r0, r9
+ adcs r12, r12, r3
+ umull r6, r3, r0, r7
+ adcs r0, r5, r6
+ adcs r2, r3, r2
+ adc r3, lr, #0
+ adds r7, r10, r8
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r4, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r0, #20]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r9, r1, r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ umull r2, r12, r4, r3
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r7, r8, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r5, r6, r4, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r1, lr, r4, r0
+ umull r10, r0, r4, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r2, r0, r2
+ adcs r2, r12, r1
+ umlal r0, r1, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r12, [sp, #112] @ 4-byte Reload
+ adcs r2, lr, r5
+ adcs r5, r6, r7
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r7, r8, r9
+ ldr r9, [sp, #68] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r8, r3, r10
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adcs lr, r3, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mul r0, r8, r1
+ umull r3, r4, r0, r10
+ umull r1, r2, r0, r12
+ adds r1, r4, r1
+ str r3, [sp, #80] @ 4-byte Spill
+ umull r6, r1, r0, r11
+ adcs r2, r2, r6
+ umlal r4, r6, r0, r12
+ umull r2, r3, r0, r5
+ adcs r1, r1, r2
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r2, r1, r0, r9
+ adcs r2, r3, r2
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r3, r2, r0, r7
+ adcs r1, r1, r3
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adc r0, r2, #0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adds r2, r2, r8
+ ldr r2, [sp, #108] @ 4-byte Reload
+ adcs r12, r4, lr
+ adcs lr, r6, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r8, r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r6, r3, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r3, r1, r2
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ subs r4, r12, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, lr, r0
+ sbcs r1, r8, r11
+ mov r11, r6
+ sbcs r5, r6, r5
+ sbcs r6, r3, r9
+ mov r9, r2
+ sbcs r10, r2, r7
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbc r7, r2, #0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ands r7, r7, #1
+ movne r4, r12
+ movne r0, lr
+ movne r1, r8
+ cmp r7, #0
+ movne r5, r11
+ movne r6, r3
+ movne r10, r9
+ str r4, [r2]
+ str r0, [r2, #4]
+ str r1, [r2, #8]
+ str r5, [r2, #12]
+ str r6, [r2, #16]
+ str r10, [r2, #20]
+ add sp, sp, #116
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF6L
+ .align 2
+ .type mcl_fp_montNF6L,%function
+mcl_fp_montNF6L: @ @mcl_fp_montNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r4, r12}
+ ldr r5, [r1, #4]
+ ldr r0, [r2, #12]
+ ldr r9, [r2, #8]
+ ldr r2, [r1]
+ ldr r7, [r1, #8]
+ ldr lr, [r3, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r6, r8, r5, r4
+ mov r10, r5
+ umull r11, r5, r2, r4
+ str r2, [sp, #52] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ str lr, [sp, #40] @ 4-byte Spill
+ adds r6, r5, r6
+ umull r2, r6, r7, r4
+ adcs r7, r8, r2
+ umlal r5, r2, r10, r4
+ umull r7, r8, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, r7
+ ldr r6, [r1, #16]
+ str r0, [sp, #64] @ 4-byte Spill
+ umull r7, r0, r6, r4
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [r3]
+ adcs r7, r8, r7
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r6, [sp, #80] @ 4-byte Spill
+ umull r1, r8, r7, r4
+ str r7, [sp, #76] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [r3, #-4]
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r8, #0
+ ldr r8, [r3, #4]
+ str r0, [sp, #16] @ 4-byte Spill
+ mul r0, r11, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ umull r1, r7, r0, r6
+ str r8, [sp, #68] @ 4-byte Spill
+ adds r1, r1, r11
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r1, r4, r0, r8
+ adcs r8, r1, r5
+ ldr r1, [r3, #12]
+ umull r5, r11, r0, lr
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r6, r5, r2
+ str r1, [sp, #84] @ 4-byte Spill
+ umull r5, r7, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs lr, r5, r1
+ ldr r1, [r3, #16]
+ str r1, [sp, #64] @ 4-byte Spill
+ umull r5, r4, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [r3, #20]
+ umull r3, r2, r0, r1
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ adc r3, r1, #0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r1, r8, r1
+ ldr r8, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r11, lr, r11
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr lr, [sp, #76] @ 4-byte Reload
+ adcs r1, r5, r7
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r1, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r3, r2
+ umull r3, r6, r12, r10
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ umull r7, r1, r12, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adds r3, r1, r3
+ umull r2, r3, r12, r0
+ adcs r6, r6, r2
+ umlal r1, r2, r12, r10
+ ldr r10, [sp, #68] @ 4-byte Reload
+ umull r6, r0, r12, r8
+ adcs r4, r3, r6
+ umull r6, r3, r12, r5
+ adcs r5, r0, r6
+ umull r6, r0, r12, lr
+ ldr r12, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r7, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r11
+ adcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r11, r5, r4
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r3, r3, r5
+ adc r0, r0, #0
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r7, r0
+ umull r0, r5, r4, r3
+ adds r0, r0, r7
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r0, r3, r4, r10
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r3, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r1, r7, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r1, r1, r2
+ umull r2, r7, r4, r0
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r2, r2, r6
+ umull r6, r0, r4, r7
+ adcs r6, r6, r11
+ umull r7, r11, r4, r12
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r12, [sp, #48] @ 4-byte Reload
+ adcs r4, r7, r4
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r3, r3, r5
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r0, r4, r0
+ str r1, [sp, #8] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r7, r11
+ ldr r11, [sp, #52] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r6, r1, r9, r11
+ umull r5, r4, r9, r0
+ adds r5, r1, r5
+ umull r2, r5, r9, r12
+ adcs r4, r4, r2
+ umlal r1, r2, r9, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ umull r4, r7, r9, r8
+ adcs r8, r5, r4
+ umull r5, r4, r9, r3
+ adcs r5, r7, r5
+ umull r7, r3, r9, lr
+ ldr lr, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r7
+ adc r3, r3, #0
+ adds r7, r6, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r8, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adcs r9, r5, r0
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r4, r0
+ mul r4, r7, r8
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r3, #0
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r5, r4, r3
+ adds r0, r0, r7
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r0, r3, r4, r10
+ ldr r10, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r0, r0, r1
+ umull r1, r3, r4, r10
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ umull r2, r7, r4, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r7, [sp] @ 4-byte Spill
+ adcs r2, r2, r6
+ umull r6, r7, r4, r3
+ adcs r6, r6, r9
+ umull r3, r9, r4, lr
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r0, r0, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r7
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r4, r9
+ ldr r4, [sp, #44] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r3, lr, r0, r12
+ ldr r12, [sp, #36] @ 4-byte Reload
+ umull r9, r2, r0, r11
+ umull r6, r7, r0, r4
+ mov r1, r2
+ adds r2, r2, r6
+ mov r5, r3
+ adcs r2, r7, r3
+ umlal r1, r5, r0, r4
+ umull r2, r3, r0, r12
+ adcs r11, lr, r2
+ ldr lr, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #76] @ 4-byte Reload
+ umull r4, r6, r0, lr
+ adcs r3, r3, r4
+ umull r4, r7, r0, r2
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r4, r6, r4
+ adc r6, r7, #0
+ adds r0, r9, r0
+ ldr r9, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r7, r5, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r11, r2
+ adcs r11, r3, r5
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r3, r4, r3
+ mul r4, r0, r8
+ ldr r8, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #24] @ 4-byte Spill
+ adc r3, r6, #0
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r5, r3, r4, r8
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adds r0, r5, r0
+ umull r0, r5, r4, r3
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ umull r1, r3, r4, r10
+ ldr r10, [sp, #60] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r1, r1, r7
+ umull r7, r3, r4, r5
+ adcs r2, r7, r2
+ umull r7, r5, r4, r9
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r11
+ umull r6, r11, r4, r10
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r4, r6, r4
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r4, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r6, r11
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r5, [r0, #16]
+ umull r11, r2, r5, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ umull r4, r0, r5, r7
+ adds r4, r2, r4
+ umull r3, r4, r5, r1
+ adcs r0, r0, r3
+ umlal r2, r3, r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r0, r6, r5, r12
+ adcs r12, r4, r0
+ umull r4, r1, r5, lr
+ adcs r4, r6, r4
+ umull r6, r0, r5, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r6
+ adc r0, r0, #0
+ adds r6, r11, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r5, r12, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r7, r4, r7
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r7
+ adc r0, r0, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r6, r0
+ umull r0, r1, r4, r8
+ ldr r8, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adds r0, r0, r6
+ ldr r7, [sp, #16] @ 4-byte Reload
+ umull r0, r11, r4, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ umull r2, lr, r4, r8
+ adcs r2, r2, r3
+ umull r3, r12, r4, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r5
+ umull r5, r6, r4, r9
+ adcs r5, r5, r1
+ umull r1, r9, r4, r10
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r0, r0, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r2, r11
+ adcs r11, r3, lr
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r10, r5, r12
+ adcs r0, r1, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, r9
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r5, [r0, #20]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r6, r1, r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r4, r6
+ umull lr, r3, r5, r0
+ umull r12, r0, r5, r7
+ mov r2, r3
+ adds r3, r3, r12
+ umlal r2, r4, r5, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ umull r0, r3, r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r12, r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r1, r6, r5, r0
+ adcs r1, r3, r1
+ umull r3, r0, r5, r7
+ ldr r5, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r3, r6, r3
+ adc r0, r0, #0
+ adds r6, lr, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr lr, [sp, #68] @ 4-byte Reload
+ adcs r2, r2, r7
+ adcs r7, r4, r11
+ adcs r9, r12, r10
+ adcs r1, r1, r5
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r3, r1
+ adc r0, r0, #0
+ str r1, [sp, #76] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r6, r0
+ umull r0, r1, r4, r5
+ umull r3, r11, r4, lr
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adds r0, r0, r6
+ umull r6, r0, r4, r8
+ adcs r12, r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r10, r6, r7
+ umull r3, r0, r4, r1
+ adcs r9, r3, r9
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ umull r7, r0, r4, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r0
+ umull r6, r0, r4, r2
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r12, r12, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r11, r10, r11
+ adcs r9, r9, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r7, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r6, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r4, r0
+ subs r5, r12, r5
+ sbcs r4, r11, lr
+ mov lr, r0
+ sbcs r6, r9, r8
+ sbcs r1, r10, r1
+ sbcs r8, r7, r3
+ sbc r3, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ asr r0, r3, #31
+ cmp r0, #0
+ movlt r5, r12
+ movlt r4, r11
+ movlt r6, r9
+ cmp r0, #0
+ movlt r1, r10
+ movlt r8, r7
+ movlt r3, lr
+ str r5, [r2]
+ str r4, [r2, #4]
+ str r6, [r2, #8]
+ str r1, [r2, #12]
+ str r8, [r2, #16]
+ str r3, [r2, #20]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed6L
+ .align 2
+ .type mcl_fp_montRed6L,%function
+mcl_fp_montRed6L: @ @mcl_fp_montRed6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ ldr r6, [r1, #4]
+ ldr r10, [r2, #-4]
+ ldr r9, [r1]
+ ldr r3, [r2, #8]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2]
+ ldr r8, [r2, #4]
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [r1, #8]
+ mul r4, r9, r10
+ str r3, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ str r10, [sp, #92] @ 4-byte Spill
+ umull r12, r7, r4, r3
+ str r7, [sp, #52] @ 4-byte Spill
+ umull r7, r3, r4, r0
+ mov lr, r12
+ str r7, [sp, #56] @ 4-byte Spill
+ mov r0, r3
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [r1, #12]
+ umlal r0, lr, r4, r8
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [r2, #20]
+ umull r5, r7, r4, r6
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ ldr r2, [r2, #12]
+ str r5, [sp, #44] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r5, r7, r4, r6
+ str r6, [sp, #96] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ str r7, [sp, #40] @ 4-byte Spill
+ umull r6, r7, r4, r2
+ umull r11, r2, r4, r8
+ adds r3, r3, r11
+ adcs r2, r2, r12
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r12, r2, r6
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r11, r7, r5
+ adcs r2, r3, r2
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adds r6, r9, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ add r9, r1, #16
+ adcs r0, r2, r0
+ mul r6, r0, r10
+ ldr r10, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ umull r3, r0, r6, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r9, {r2, r4, r7, r9}
+ ldr r5, [sp, #76] @ 4-byte Reload
+ umull r0, r1, r6, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ ldr lr, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ mov r12, r3
+ adcs r2, r2, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r1
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ umlal r0, r12, r6, r8
+ adcs r2, r4, r2
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ adcs r2, r9, #0
+ umull r9, r11, r6, lr
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #28] @ 4-byte Spill
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r7, r2, r6, r8
+ adds r1, r1, r7
+ adcs r2, r2, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ umull r1, r7, r6, r4
+ umull r2, r4, r6, r3
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r2, r6, r2
+ adcs r1, r4, r1
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r2, [sp, #56] @ 4-byte Spill
+ str r1, [sp, #4] @ 4-byte Spill
+ adcs r1, r7, r9
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adc r7, r11, #0
+ adds r6, r4, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ mul r6, r1, r0
+ umull r9, r0, r6, r10
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r0, r1, r6, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ mov r4, r9
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r5, r2, r5
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, r1
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ umlal r0, r4, r6, r8
+ adcs r2, r2, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r5, r2
+ umull r5, r10, r6, lr
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r2, r7, r2
+ umull r7, r12, r6, r8
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adc r2, r2, #0
+ adds r1, r1, r7
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r7, r2, r6, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ umull r11, lr, r6, r1
+ adcs r6, r12, r9
+ adcs r3, r3, r7
+ adcs r12, r2, r11
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r2, lr, r5
+ ldr r5, [sp, #80] @ 4-byte Reload
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adc r9, r10, #0
+ adds r6, r3, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r6, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r11, r0, r6, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r7, r6, lr
+ mov r10, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r2, r7
+ umlal r2, r10, r6, r8
+ adcs r0, r4, r0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ ldr r12, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r4, r0, r6, r12
+ str r4, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r4, r0, r6, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r6, r8
+ adds r7, r7, r9
+ adcs r0, r0, r11
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r7, r9, r6, r1
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r9, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r7, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adc r11, r0, #0
+ adds r4, r6, r4
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r4
+ mul r4, r2, r3
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r9, r2, r4, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ umull r3, r2, r4, lr
+ mov r6, r2
+ str r3, [sp, #32] @ 4-byte Spill
+ mov r3, r9
+ umlal r6, r3, r4, r8
+ adcs r5, r10, r5
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r5
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r7, r0, r4, r12
+ mov r12, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r11, r0, r4, r8
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ umull r1, r5, r4, r12
+ adds r2, r2, r11
+ adcs r0, r0, r9
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r0, [sp, #28] @ 4-byte Reload
+ umull lr, r10, r4, r7
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ adcs r1, r5, lr
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adcs r2, r10, r2
+ adc lr, r5, #0
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adds r4, r5, r4
+ ldr r5, [sp, #76] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r9, r6, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r3, r3, r4
+ str r3, [sp, #68] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ mul r0, r9, r1
+ umull r2, r4, r0, r5
+ umull r1, r3, r0, r8
+ adds r1, r4, r1
+ str r2, [sp, #92] @ 4-byte Spill
+ umull r1, r2, r0, r6
+ adcs r3, r3, r1
+ umlal r4, r1, r0, r8
+ umull r3, lr, r0, r12
+ adcs r10, r2, r3
+ umull r3, r2, r0, r7
+ adcs r11, lr, r3
+ ldr lr, [sp, #84] @ 4-byte Reload
+ umull r7, r3, r0, lr
+ adcs r2, r2, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adc r0, r3, #0
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adds r3, r3, r9
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adcs r3, r4, r3
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r10, r10, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r9, r11, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r7, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r11, r0, #0
+ subs r0, r3, r5
+ sbcs r5, r12, r8
+ mov r8, r7
+ sbcs r2, r10, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ sbcs r4, r9, r4
+ sbcs r6, r7, r6
+ sbcs r7, r1, lr
+ mov lr, r1
+ sbc r1, r11, #0
+ ands r1, r1, #1
+ movne r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ movne r5, r12
+ movne r2, r10
+ cmp r1, #0
+ movne r4, r9
+ movne r6, r8
+ movne r7, lr
+ str r0, [r3]
+ str r5, [r3, #4]
+ str r2, [r3, #8]
+ str r4, [r3, #12]
+ str r6, [r3, #16]
+ str r7, [r3, #20]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre6L
+ .align 2
+ .type mcl_fp_addPre6L,%function
+mcl_fp_addPre6L: @ @mcl_fp_addPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r9, r12, lr}
+ ldr r10, [r1, #12]
+ ldr r5, [r1, #16]
+ ldr r8, [r1, #20]
+ ldm r2, {r6, r7}
+ add r4, r2, #8
+ ldm r4, {r1, r3, r4}
+ ldr r2, [r2, #20]
+ adds r6, r6, r9
+ adcs r7, r7, r12
+ add r12, r0, #8
+ adcs r1, r1, lr
+ stm r0, {r6, r7}
+ adcs r3, r3, r10
+ adcs r5, r4, r5
+ adcs r2, r2, r8
+ stm r12, {r1, r3, r5}
+ str r2, [r0, #20]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre6L
+ .align 2
+ .type mcl_fp_subPre6L,%function
+mcl_fp_subPre6L: @ @mcl_fp_subPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r2, {r9, r12, lr}
+ ldr r10, [r2, #12]
+ ldr r5, [r2, #16]
+ ldr r8, [r2, #20]
+ ldm r1, {r6, r7}
+ add r4, r1, #8
+ ldm r4, {r2, r3, r4}
+ ldr r1, [r1, #20]
+ subs r6, r6, r9
+ sbcs r7, r7, r12
+ add r12, r0, #8
+ sbcs r2, r2, lr
+ stm r0, {r6, r7}
+ sbcs r3, r3, r10
+ sbcs r5, r4, r5
+ sbcs r1, r1, r8
+ stm r12, {r2, r3, r5}
+ str r1, [r0, #20]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_6L
+ .align 2
+ .type mcl_fp_shr1_6L,%function
+mcl_fp_shr1_6L: @ @mcl_fp_shr1_6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #20]
+ lsrs r5, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r5, lr, #1
+ orr r6, r3, r2, lsl #31
+ lsr r5, lr, #1
+ rrx r2, r2
+ lsrs r3, r1, #1
+ lsr r1, r1, #1
+ str r12, [r0]
+ str r6, [r0, #4]
+ orr r5, r5, r4, lsl #31
+ rrx r3, r4
+ str r2, [r0, #8]
+ str r5, [r0, #12]
+ str r3, [r0, #16]
+ str r1, [r0, #20]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add6L
+ .align 2
+ .type mcl_fp_add6L,%function
+mcl_fp_add6L: @ @mcl_fp_add6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r9, r12, lr}
+ ldr r7, [r2]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r4, r5, r6}
+ ldr r2, [r2, #20]
+ adds r7, r7, r9
+ adcs r12, r1, r12
+ add r1, r0, #8
+ adcs r4, r4, lr
+ stm r0, {r7, r12}
+ adcs r5, r5, r10
+ adcs r6, r6, r11
+ stm r1, {r4, r5, r6}
+ adcs r2, r2, r8
+ mov r1, #0
+ str r2, [r0, #20]
+ adc r9, r1, #0
+ ldm r3, {r1, lr}
+ ldr r10, [r3, #8]
+ ldr r11, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r3, [r3, #20]
+ subs r7, r7, r1
+ sbcs r1, r12, lr
+ sbcs r10, r4, r10
+ sbcs r12, r5, r11
+ sbcs lr, r6, r8
+ sbcs r4, r2, r3
+ sbc r2, r9, #0
+ tst r2, #1
+ streq r7, [r0]
+ stmibeq r0, {r1, r10, r12, lr}
+ streq r4, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF6L
+ .align 2
+ .type mcl_fp_addNF6L,%function
+mcl_fp_addNF6L: @ @mcl_fp_addNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ add r11, r1, #8
+ ldm r1, {r12, lr}
+ ldm r11, {r9, r10, r11}
+ ldr r7, [r2]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r4, r5, r6}
+ ldr r2, [r2, #20]
+ adds r7, r7, r12
+ adcs r1, r1, lr
+ adcs r4, r4, r9
+ adcs r9, r5, r10
+ adcs lr, r6, r11
+ add r11, r3, #8
+ adc r12, r2, r8
+ ldm r3, {r2, r6}
+ ldm r11, {r5, r8, r10, r11}
+ subs r2, r7, r2
+ sbcs r6, r1, r6
+ sbcs r5, r4, r5
+ sbcs r3, r9, r8
+ sbcs r8, lr, r10
+ sbc r10, r12, r11
+ asr r11, r10, #31
+ cmp r11, #0
+ movlt r2, r7
+ movlt r6, r1
+ movlt r5, r4
+ cmp r11, #0
+ movlt r3, r9
+ movlt r8, lr
+ movlt r10, r12
+ str r2, [r0]
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ str r3, [r0, #12]
+ str r8, [r0, #16]
+ str r10, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub6L
+ .align 2
+ .type mcl_fp_sub6L,%function
+mcl_fp_sub6L: @ @mcl_fp_sub6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r9, [r2]
+ ldmib r2, {r8, r12, lr}
+ ldr r10, [r2, #16]
+ ldr r11, [r2, #20]
+ ldm r1, {r2, r4, r5, r6, r7}
+ ldr r1, [r1, #20]
+ subs r9, r2, r9
+ sbcs r2, r4, r8
+ str r9, [r0]
+ sbcs r12, r5, r12
+ sbcs lr, r6, lr
+ sbcs r4, r7, r10
+ stmib r0, {r2, r12, lr}
+ sbcs r5, r1, r11
+ mov r1, #0
+ str r4, [r0, #16]
+ sbc r1, r1, #0
+ str r5, [r0, #20]
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ moveq pc, lr
+ ldm r3, {r1, r6, r7, r8, r10}
+ ldr r3, [r3, #20]
+ adds r1, r1, r9
+ adcs r2, r6, r2
+ adcs r7, r7, r12
+ adcs r6, r8, lr
+ stm r0, {r1, r2, r7}
+ adcs r4, r10, r4
+ str r6, [r0, #12]
+ adc r3, r3, r5
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF6L
+ .align 2
+ .type mcl_fp_subNF6L,%function
+mcl_fp_subNF6L: @ @mcl_fp_subNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ add r11, r2, #8
+ ldm r2, {r12, lr}
+ ldm r11, {r9, r10, r11}
+ ldr r7, [r1]
+ ldr r8, [r2, #20]
+ ldmib r1, {r2, r4, r5, r6}
+ ldr r1, [r1, #20]
+ subs r7, r7, r12
+ sbcs r2, r2, lr
+ sbcs r9, r4, r9
+ sbcs lr, r5, r10
+ ldr r5, [r3, #4]
+ sbcs r12, r6, r11
+ ldr r6, [r3]
+ add r11, r3, #8
+ sbc r1, r1, r8
+ ldm r11, {r4, r8, r10, r11}
+ adds r6, r7, r6
+ adcs r5, r2, r5
+ adcs r4, r9, r4
+ adcs r3, lr, r8
+ adcs r8, r12, r10
+ adc r10, r1, r11
+ asr r11, r1, #31
+ cmp r11, #0
+ movge r6, r7
+ movge r5, r2
+ movge r4, r9
+ cmp r11, #0
+ movge r3, lr
+ movge r8, r12
+ movge r10, r1
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r3, [r0, #12]
+ str r8, [r0, #16]
+ str r10, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add6L
+ .align 2
+ .type mcl_fpDbl_add6L,%function
+mcl_fpDbl_add6L: @ @mcl_fpDbl_add6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r12, lr}
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r5, [r2, #16]
+ ldr r11, [r2]
+ ldr r4, [r2, #12]
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ adds r9, r11, r12
+ add r11, r1, #32
+ adcs r6, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r8
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r2, #32]
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r2, #36]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #40]
+ ldr r2, [r2, #44]
+ str r5, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r5, r4, r10
+ ldm r11, {r4, r8, r11}
+ ldr r10, [r1, #44]
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r6, r7}
+ ldr r6, [sp] @ 4-byte Reload
+ str r5, [r0, #12]
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r5, [r3]
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r12
+ adcs r2, r2, lr
+ adcs r12, r7, r4
+ ldr r7, [sp, #12] @ 4-byte Reload
+ mov r4, #0
+ adcs r9, r7, r8
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r8, r7, r11
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs lr, r7, r10
+ adc r7, r4, #0
+ ldmib r3, {r4, r6, r10, r11}
+ subs r5, r1, r5
+ ldr r3, [r3, #20]
+ sbcs r4, r2, r4
+ sbcs r6, r12, r6
+ sbcs r10, r9, r10
+ sbcs r11, r8, r11
+ sbcs r3, lr, r3
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r5, r1
+ movne r4, r2
+ movne r6, r12
+ cmp r7, #0
+ add r1, r0, #32
+ movne r10, r9
+ movne r11, r8
+ movne r3, lr
+ str r5, [r0, #24]
+ str r4, [r0, #28]
+ stm r1, {r6, r10, r11}
+ str r3, [r0, #44]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub6L
+ .align 2
+ .type mcl_fpDbl_sub6L,%function
+mcl_fpDbl_sub6L: @ @mcl_fpDbl_sub6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r6, [r2, #8]
+ ldr r7, [r2, #32]
+ add r10, r1, #12
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r2, #12]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r2, #20]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r2, #24]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #4]
+ ldr r2, [r2]
+ str r6, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldm r1, {r11, r12, lr}
+ ldr r6, [sp] @ 4-byte Reload
+ subs r2, r11, r2
+ ldr r11, [r1, #40]
+ sbcs r7, r12, r7
+ ldr r12, [r1, #36]
+ ldr r1, [r1, #32]
+ sbcs lr, lr, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ stm r0, {r2, r7, lr}
+ mov lr, #0
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r6
+ str r4, [r0, #12]
+ sbcs r2, r5, r2
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ sbcs r7, r9, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r6, r10, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r10, r12, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r9, r11, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r8, r5, r2
+ sbc r12, lr, #0
+ ldm r3, {r2, r4, r5, lr}
+ ldr r11, [r3, #16]
+ ldr r3, [r3, #20]
+ adds r2, r7, r2
+ adcs r4, r6, r4
+ adcs r5, r1, r5
+ adcs lr, r10, lr
+ adcs r11, r9, r11
+ adc r3, r8, r3
+ ands r12, r12, #1
+ moveq r2, r7
+ moveq r4, r6
+ moveq r5, r1
+ cmp r12, #0
+ moveq lr, r10
+ moveq r11, r9
+ moveq r3, r8
+ str r2, [r0, #24]
+ str r4, [r0, #28]
+ str r5, [r0, #32]
+ str lr, [r0, #36]
+ str r11, [r0, #40]
+ str r3, [r0, #44]
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 2
+ .type mcl_fp_mulUnitPre7L,%function
+mcl_fp_mulUnitPre7L: @ @mcl_fp_mulUnitPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r11, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r9, [r1, #20]
+ ldr r8, [r1, #24]
+ umull r7, r1, lr, r2
+ umull lr, r4, r12, r2
+ mov r5, r4
+ mov r6, r7
+ str lr, [r0]
+ umlal r5, r6, r3, r2
+ stmib r0, {r5, r6}
+ umull r6, r5, r3, r2
+ adds r3, r4, r6
+ umull r3, r6, r11, r2
+ adcs r7, r5, r7
+ adcs r1, r1, r3
+ str r1, [r0, #12]
+ umull r1, r3, r10, r2
+ adcs r1, r6, r1
+ str r1, [r0, #16]
+ umull r1, r7, r9, r2
+ adcs r1, r3, r1
+ str r1, [r0, #20]
+ umull r1, r3, r8, r2
+ adcs r1, r7, r1
+ str r1, [r0, #24]
+ adc r1, r3, #0
+ str r1, [r0, #28]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 2
+ .type mcl_fpDbl_mulPre7L,%function
+mcl_fpDbl_mulPre7L: @ @mcl_fpDbl_mulPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r3, r2
+ ldr r7, [r1]
+ ldr lr, [r1, #4]
+ mov r9, r0
+ ldr r0, [r1, #8]
+ ldr r2, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r8, [r1, #20]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r3]
+ str r9, [sp, #60] @ 4-byte Spill
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ umull r5, r4, r7, r3
+ umull r6, r12, lr, r3
+ adds r6, r4, r6
+ str r5, [sp, #48] @ 4-byte Spill
+ umull r5, r6, r0, r3
+ adcs r7, r12, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r2, r3
+ adcs r0, r6, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r6, r0, r10, r3
+ adcs r2, r11, r6
+ umull r11, r7, r8, r3
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r2, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ umull r11, r12, r0, r3
+ adcs r2, r7, r11
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r2, [r9]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r3, [r2, #4]
+ umull r11, r7, r6, r3
+ str r7, [sp, #32] @ 4-byte Spill
+ adc r7, r12, #0
+ str r7, [sp, #16] @ 4-byte Spill
+ adds r7, r11, r4
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r4, r7, lr, r3
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r4, r5
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ umull r4, r5, r7, r3
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r6, r7, r3
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r11, r5, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs lr, r4, r7
+ umull r9, r7, r10, r3
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r7, r9, r7
+ umull r4, r9, r8, r3
+ adcs r4, r4, r6
+ umull r6, r12, r0, r3
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r3, r6, r0
+ mov r0, #0
+ adc r6, r0, #0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adds r8, r5, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ ldr lr, [r1, #12]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r4, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r1, #4]
+ adcs r3, r3, r9
+ ldr r9, [r1, #8]
+ str r7, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #40] @ 4-byte Spill
+ adc r3, r6, r12
+ ldr r6, [r2, #8]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #52] @ 4-byte Spill
+ str r3, [r0, #4]
+ ldr r3, [r1]
+ umull r12, r7, r3, r6
+ str r3, [sp, #56] @ 4-byte Spill
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r3, r12, r8
+ umull r7, r0, r4, r6
+ ldr r12, [r1, #24]
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r2, r7, r5
+ umull r7, r0, r9, r6
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r10, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r5, r0, lr, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ umull r11, r3, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ adcs r11, r11, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r8, r4, r3, r6
+ adcs r8, r8, r0
+ umull r7, r0, r12, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r7, r6
+ mov r7, #0
+ adc r7, r7, #0
+ adds r2, r2, r10
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r5, r2
+ ldr r5, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r10, r5, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r11, r11, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r2, r8, r2
+ ldr r8, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ adcs r2, r6, r4
+ adc r0, r7, r0
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r0, [r7, #8]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r6, [r0, #12]
+ umull r2, r4, lr, r6
+ str r4, [sp, #48] @ 4-byte Spill
+ umull lr, r4, r9, r6
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [sp, #52] @ 4-byte Reload
+ umull r9, r5, r4, r6
+ str r5, [sp, #32] @ 4-byte Spill
+ umull r4, r5, r8, r6
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adds r4, r4, r5
+ umull r5, r8, r3, r6
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r9, r9, r4
+ adcs lr, lr, r10
+ adcs r11, r2, r11
+ ldr r2, [sp, #24] @ 4-byte Reload
+ umull r4, r10, r2, r6
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r3, r5, r2
+ umull r5, r2, r12, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r12, r5, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ mov r5, #0
+ adc r5, r5, #0
+ adds r9, r9, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs lr, lr, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r11, r6
+ ldr r11, [r1, #8]
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r3, r3, r10
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r1, #12]
+ adcs r12, r12, r8
+ str r3, [sp, #40] @ 4-byte Spill
+ adc r2, r5, r2
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r2, [r7, #12]
+ ldr r7, [r0, #16]
+ ldr r0, [r1]
+ ldr r2, [r1, #4]
+ umull r8, r3, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ adds r0, r8, r9
+ str r3, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r6, r0, r2, r7
+ ldr r2, [r1, #24]
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, lr
+ ldr lr, [r1, #16]
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r6, r0, r11, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r3, r0, r4, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [r1, #20]
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r10, r0, lr, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r9, r5, r3, r7
+ adcs r10, r10, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r9, r12
+ umull r8, r12, r2, r7
+ adcs r7, r8, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r8, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r0, r6, r0
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r7, r5
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r8, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r7, [r0, #16]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ ldr r7, [r7, #20]
+ umull r8, r6, r4, r7
+ str r6, [sp, #48] @ 4-byte Spill
+ umull r4, r6, r11, r7
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ umull r11, r5, r6, r7
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ umull r5, r9, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adds r6, r5, r6
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r12, r4, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r10, r8, r10
+ umull r5, r8, lr, r7
+ umull r4, lr, r3, r7
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r5, r5, r6
+ adcs r3, r4, r3
+ umull r4, r6, r2, r7
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r4, r2
+ mov r4, #0
+ adc r4, r4, #0
+ adds r7, r11, r9
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r7, r12, r7
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r9, r10, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r11, r5, r7
+ adcs r3, r3, r8
+ adcs r2, r2, lr
+ str r3, [sp, #40] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ adc r2, r4, r6
+ ldr r6, [r1]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r4, [r0, #24]
+ ldmib r1, {r0, r3, r5}
+ umull r12, r2, r5, r4
+ str r2, [sp, #64] @ 4-byte Spill
+ umull r5, r2, r3, r4
+ umull r3, r10, r0, r4
+ umull r0, r8, r6, r4
+ ldr r6, [r1, #16]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs lr, r3, r0
+ adcs r9, r5, r9
+ adcs r11, r12, r11
+ umull r0, r12, r6, r4
+ ldr r6, [r1, #20]
+ ldr r1, [r1, #24]
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ umull r3, r5, r6, r4
+ umull r6, r7, r1, r4
+ ldr r1, [sp, #56] @ 4-byte Reload
+ mov r4, #0
+ adcs r3, r3, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r6, r1
+ adc r4, r4, #0
+ adds r6, lr, r8
+ adcs lr, r9, r10
+ adcs r8, r11, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r12
+ adcs r1, r1, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adc r7, r4, r7
+ add r12, r2, #24
+ stm r12, {r5, r6, lr}
+ str r8, [r2, #36]
+ str r0, [r2, #40]
+ str r3, [r2, #44]
+ str r1, [r2, #48]
+ str r7, [r2, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 2
+ .type mcl_fpDbl_sqrPre7L,%function
+mcl_fpDbl_sqrPre7L: @ @mcl_fpDbl_sqrPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ ldr r9, [r1, #20]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r1, {r2, r3}
+ ldr r0, [r1, #8]
+ ldr r11, [r1, #12]
+ umull r6, r7, r2, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r5, r4, r0, r2
+ umull r12, r0, r3, r2
+ umull r8, r10, r11, r2
+ adds lr, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r6, r0, r5
+ umlal r7, r5, r3, r2
+ adcs r0, r4, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ umull r4, r6, r0, r2
+ adcs r4, r10, r4
+ mov r10, r9
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r8, r10, r2
+ adcs r6, r6, r4
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r1, #24]
+ umull lr, r9, r6, r2
+ adcs r4, r8, lr
+ str r4, [sp, #20] @ 4-byte Spill
+ adc r4, r9, #0
+ adds r2, r12, r7
+ ldr r12, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r2, r7, r3, r3
+ adcs r2, r2, r5
+ str r7, [sp, #16] @ 4-byte Spill
+ umull r5, r8, r11, r3
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r12]
+ umull lr, r2, r6, r3
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r6, r2, r10, r3
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r2, r10, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r7, r9, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r3, r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r5, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r5, #0
+ adcs r2, r2, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs lr, lr, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r11, r4, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r4, [r1, #4]
+ adcs r3, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ adcs r7, r7, r9
+ adcs r9, r2, r8
+ ldr r2, [r1, #12]
+ str r0, [r12, #4]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r12, r6, r10
+ adcs r10, lr, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr lr, [r1, #8]
+ adc r0, r5, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1]
+ umull r8, r5, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r8, r11
+ str r5, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r5, r0, r4, lr
+ ldr r4, [r1, #16]
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r5, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r3, r0, lr, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, r7
+ ldr r3, [r1, #20]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r5, r2, lr
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r9
+ ldr r9, [sp, #20] @ 4-byte Reload
+ str r5, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r11, r0, r4, lr
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r8, r0, r3, lr
+ adcs r11, r11, r12
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ adcs r8, r8, r10
+ umull r10, r12, r0, lr
+ adcs lr, r10, r7
+ mov r7, #0
+ adc r10, r7, #0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adds r6, r9, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adcs r6, r6, r7
+ adcs r11, r11, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r5, r8, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r7, lr, r5
+ str r7, [sp, #4] @ 4-byte Spill
+ adc r7, r10, r12
+ ldr r10, [sp, #48] @ 4-byte Reload
+ str r7, [sp] @ 4-byte Spill
+ umull r9, r7, r0, r2
+ umull r5, r0, r3, r2
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r4, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ umull r4, r0, r2, r2
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r8, lr, r10, r2
+ umull r0, r12, r7, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ mov r7, #0
+ adds r8, r8, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r6, r2, r6
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r11
+ adcs r3, r3, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r5, r5, r2
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r2, r9, r2
+ adc r9, r7, #0
+ adds r0, r0, lr
+ adcs r7, r6, r12
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adcs r4, r4, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r11, r3, r6
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r12, r5, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ ldr r5, [r1, #12]
+ adcs r10, r2, r3
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adc r2, r9, r2
+ ldr r9, [r1, #4]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r9, [sp, #16] @ 4-byte Spill
+ str r2, [r3, #8]
+ str r8, [r3, #12]
+ ldr r2, [r1]
+ ldr r3, [r1, #16]
+ ldr r8, [r1, #8]
+ umull lr, r6, r2, r3
+ str r2, [sp, #48] @ 4-byte Spill
+ str r8, [sp, #4] @ 4-byte Spill
+ adds r0, lr, r0
+ ldr lr, [r1, #24]
+ str r6, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r0, r2, r9, r3
+ adcs r0, r0, r7
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r7, r0, r8, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r7, r4
+ ldr r9, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r7, r0, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r7, r11
+ mov r7, #0
+ str r0, [sp] @ 4-byte Spill
+ umull r11, r0, r3, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ adcs r11, r11, r12
+ umull r12, r2, r0, r3
+ adcs r4, r12, r10
+ umull r10, r8, lr, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ adcs r3, r10, r3
+ adc r10, r7, #0
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r6, r9, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r11, r11, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r4, r4, r7
+ adcs r2, r3, r2
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r6, r2, r5, r0
+ adc r10, r10, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ umull r5, r2, r3, r0
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ umull r8, r3, r2, r0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ umull r3, r9, r2, r0
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r2, r3, r2
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r8, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r5, r5, r2
+ adcs r6, r6, r11
+ adcs r2, r12, r4
+ umull r4, r8, r0, r0
+ adcs r4, r4, r3
+ umull r3, r11, lr, r0
+ adcs r0, r3, r10
+ mov r3, #0
+ adc r3, r3, #0
+ adds r7, r7, r9
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r9, r5, r7
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r6, r6, r5
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r10, r2, r6
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r12, r4, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ adc r0, r3, r11
+ ldr r3, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r0, [r2, #16]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r0, [r2, #20]
+ ldm r1, {r0, r4}
+ ldr r5, [r1, #12]
+ ldr r2, [r1, #8]
+ umull lr, r6, r5, r3
+ umull r5, r11, r2, r3
+ umull r2, r8, r4, r3
+ str r6, [sp, #52] @ 4-byte Spill
+ umull r4, r6, r0, r3
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adds r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r2, r9
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r4, r5, r0
+ ldr r0, [r1, #16]
+ ldr r1, [r1, #20]
+ adcs r10, lr, r10
+ umull r7, lr, r0, r3
+ adcs r0, r7, r12
+ umull r7, r12, r1, r3
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r7, r1
+ umull r7, r5, r3, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r3, r7, r3
+ mov r7, #0
+ adc r7, r7, #0
+ adds r6, r9, r6
+ adcs r4, r4, r8
+ adcs r8, r10, r11
+ adcs r0, r0, r2
+ adcs r1, r1, lr
+ adcs r2, r3, r12
+ adc r3, r7, r5
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r5, [sp, #40] @ 4-byte Reload
+ add r12, r7, #40
+ str r5, [r7, #24]
+ str r6, [r7, #28]
+ str r4, [r7, #32]
+ str r8, [r7, #36]
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont7L
+ .align 2
+ .type mcl_fp_mont7L,%function
+mcl_fp_mont7L: @ @mcl_fp_mont7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r0, {r2, lr}
+ ldr r7, [r0, #8]
+ ldr r0, [r0, #12]
+ ldr r5, [r3, #-4]
+ ldr r6, [r3, #8]
+ ldr r9, [r3, #4]
+ ldr r11, [r1, #8]
+ ldr r12, [r1, #12]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1]
+ str r5, [sp, #80] @ 4-byte Spill
+ str r6, [sp, #116] @ 4-byte Spill
+ str r9, [sp, #108] @ 4-byte Spill
+ str r11, [sp, #104] @ 4-byte Spill
+ str r12, [sp, #72] @ 4-byte Spill
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r3]
+ umull r4, r8, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ mul r0, r4, r5
+ str r4, [sp, #44] @ 4-byte Spill
+ umull r10, r4, r0, r6
+ str r4, [sp, #32] @ 4-byte Spill
+ str r10, [sp, #8] @ 4-byte Spill
+ umull r4, r5, r0, r7
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ mov r4, r5
+ str r5, [sp, #4] @ 4-byte Spill
+ umlal r4, r10, r0, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #24]
+ umull r6, r5, r4, r2
+ str r4, [sp, #88] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ ldr r1, [r1, #16]
+ str r6, [sp, #96] @ 4-byte Spill
+ str r5, [sp, #120] @ 4-byte Spill
+ umull r6, r5, r4, r2
+ str r4, [sp, #64] @ 4-byte Spill
+ umull r9, r4, r1, r2
+ str r1, [sp, #100] @ 4-byte Spill
+ str r6, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #92] @ 4-byte Spill
+ str r4, [sp, #20] @ 4-byte Spill
+ umull r6, r5, r12, r2
+ umull r12, r4, r11, r2
+ umull r11, r1, r7, r2
+ adds r7, r8, r11
+ adcs r7, r1, r12
+ adcs r1, r4, r6
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, r5, r9
+ ldr r5, [r3, #12]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adcs r1, r4, r1
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ umull r9, r4, r0, r1
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ umull r3, r7, r0, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ adds r3, r6, r3
+ str r4, [sp, #92] @ 4-byte Spill
+ umull r3, r6, r0, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r11, r5, r3
+ umull r7, r5, r0, r1
+ adcs r1, r6, r7
+ umull r7, r3, r0, r4
+ ldr r4, [sp] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r0, r5, r7
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r3, r3, r9
+ adc r7, r4, #0
+ mov r4, #0
+ umlal r8, r12, r5, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r2, r6, r2
+ mov r6, r5
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r2, r10, r12
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r11, r2
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ umull r2, r3, lr, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r4, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r12, r9, lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r8, r4, lr, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r7, lr, r0
+ umull r11, r0, lr, r10
+ adds r2, r0, r2
+ adcs r2, r3, r1
+ umlal r0, r1, lr, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ umull r2, r3, lr, r5
+ adcs r2, r7, r2
+ adcs r10, r3, r8
+ ldr r8, [sp, #64] @ 4-byte Reload
+ umull r7, r3, lr, r8
+ adcs r4, r4, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r3, r3, r12
+ adc r5, r9, #0
+ adds r7, r7, r11
+ adcs r0, r6, r0
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ mul r0, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull lr, r12, r0, r6
+ umull r3, r4, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ mov r2, r3
+ umull r9, r5, r0, r1
+ mov r1, r5
+ adds r5, r5, lr
+ umlal r1, r2, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r5, lr, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ umull r3, r12, r0, r6
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r3, r4, r3
+ adcs r12, r12, r5
+ umull r4, r5, r0, r6
+ adcs lr, lr, r4
+ umull r6, r4, r0, r10
+ adcs r0, r5, r6
+ adc r4, r4, #0
+ adds r5, r9, r7
+ ldr r9, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r12, r1
+ ldr r12, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r2, r1, r12, r0
+ umull r10, r0, r12, r8
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r1, [sp, #12] @ 4-byte Spill
+ umull r2, lr, r12, r3
+ umull r7, r8, r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r5, r6, r12, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r4, r12, r0
+ umull r11, r0, r12, r9
+ adds r2, r0, r2
+ str r11, [sp] @ 4-byte Spill
+ adcs r2, lr, r1
+ umlal r0, r1, r12, r3
+ adcs lr, r4, r5
+ ldmib sp, {r4, r5}
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r7, r6, r7
+ adcs r6, r8, r10
+ adcs r4, r4, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r9, r3, r2
+ ldr r3, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ mul r0, r9, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r3, r2, r0, r1
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #112] @ 4-byte Reload
+ umull r7, r1, r0, r2
+ mov r2, r3
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ mov r5, r1
+ umlal r5, r2, r0, r6
+ umull r10, r4, r0, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r12, r8, r0, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ umull lr, r4, r0, r7
+ umull r11, r7, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adds r1, r1, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ adcs r1, r7, r3
+ umull r1, r3, r0, r11
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ adcs r1, r3, lr
+ adcs r3, r4, r12
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r7, r8, r10
+ ldr r10, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #64] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r4, r4, r9
+ ldr r9, [sp, #72] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r5, r5, r4
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r4, r5, r10, r7
+ adcs r0, r6, r0
+ str r4, [sp, #16] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r6, r10, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ umull r2, r3, r10, r0
+ adds r2, r5, r2
+ adcs r2, r3, r1
+ umull r2, r3, r10, r9
+ adcs r7, r6, r2
+ ldr r6, [sp, #100] @ 4-byte Reload
+ umull r2, r12, r10, r6
+ adcs r6, r3, r2
+ umull r3, lr, r10, r8
+ mov r2, r10
+ ldr r10, [sp, #88] @ 4-byte Reload
+ adcs r4, r12, r3
+ umlal r5, r1, r2, r0
+ umull r3, r12, r2, r10
+ mov r10, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r3, lr, r3
+ adc r12, r12, #0
+ adds lr, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r5, r12, r0, r7
+ umull r3, r6, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ umull r2, r4, r0, r1
+ str r2, [sp, #20] @ 4-byte Spill
+ mov r1, r4
+ mov r2, r3
+ adds r4, r4, r5
+ umlal r1, r2, r0, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r3, r12, r0, r11
+ adcs r11, r6, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ umull r4, r5, r0, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r12, r12, r4
+ umull r4, r6, r0, r3
+ adcs r4, r5, r4
+ umull r5, r3, r0, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r6, r5, lr
+ adcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #16]
+ umull lr, r6, r0, r8
+ umull r5, r3, r0, r10
+ umull r8, r2, r0, r1
+ umull r12, r4, r0, r9
+ adds r5, r2, r5
+ umull r1, r5, r0, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r3, r3, r1
+ umlal r2, r1, r0, r10
+ adcs r9, r5, r12
+ umull r5, r3, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r12, r4, r5
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs lr, r3, lr
+ umull r5, r3, r0, r4
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r5, r6, r5
+ adc r3, r3, #0
+ adds r4, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r12
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r1, r4, r11
+ adcs r0, r0, lr
+ umull lr, r12, r1, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r2, r6, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r3, r2
+ umull r8, r5, r1, r0
+ mov r0, r5
+ adds r5, r5, lr
+ umlal r0, r3, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r2, r12, r2
+ umull r5, lr, r1, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r2, r12, r1, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r9, r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r12, r12, r5
+ umull r5, r6, r1, r7
+ adcs lr, lr, r5
+ umull r7, r5, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r1, r6, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r4, r8, r4
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ mov r12, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #20]
+ umull lr, r8, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ umull r6, r3, r0, r12
+ umull r4, r5, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r10, r2, r0, r1
+ adds r6, r2, r6
+ umull r1, r6, r0, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r1
+ umlal r2, r1, r0, r12
+ ldr r3, [sp, #100] @ 4-byte Reload
+ adcs r9, r6, r4
+ umull r4, r6, r0, r3
+ adcs r4, r5, r4
+ adcs r3, r6, lr
+ umull r5, r6, r0, r7
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r5, r8, r5
+ adc r6, r6, #0
+ adds lr, r0, r10
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ mul r1, lr, r11
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ umull r6, r12, r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r3, r4, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r2, r3
+ umull r8, r5, r1, r0
+ mov r0, r5
+ adds r5, r5, r6
+ umlal r0, r2, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r5, r6, r1, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r3, r12, r1, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r9, r4, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r12, r12, r5
+ umull r4, r5, r1, r3
+ adcs r4, r6, r4
+ umull r6, r3, r1, r7
+ adcs r1, r5, r6
+ adc r3, r3, #0
+ adds r6, r8, lr
+ adcs r0, r0, r10
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ ldr r12, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #24]
+ umull r3, r2, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #60] @ 4-byte Spill
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r3, lr, r0, r12
+ umull r9, r2, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #88] @ 4-byte Spill
+ umull r7, r8, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ umull r5, r6, r0, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ umull r2, r4, r0, r1
+ umull r10, r1, r0, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r3, r1, r3
+ str r10, [sp, #104] @ 4-byte Spill
+ ldr r10, [sp, #96] @ 4-byte Reload
+ adcs r3, lr, r2
+ umlal r1, r2, r0, r12
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs lr, r4, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #88] @ 4-byte Reload
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r6, r6, r7
+ adcs r7, r8, r9
+ ldr r8, [sp, #108] @ 4-byte Reload
+ adcs r5, r3, r5
+ ldr r3, [sp, #104] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r9, r0, r3
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r1, r9, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ umull r2, r3, r1, r8
+ umull r4, r5, r1, r0
+ adds r2, r5, r2
+ umull r0, r2, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r3, r0
+ umull r3, r12, r1, lr
+ adcs r6, r2, r3
+ umull r3, r2, r1, r7
+ adcs r12, r12, r3
+ umull r7, r3, r1, r11
+ adcs r2, r2, r7
+ str r2, [sp, #80] @ 4-byte Spill
+ umull r7, r2, r1, r10
+ adcs r3, r3, r7
+ mov r7, r8
+ umlal r5, r0, r1, r7
+ adc r1, r2, #0
+ adds r2, r4, r9
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r8, r5, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r4, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #88] @ 4-byte Spill
+ adcs r6, r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #100] @ 4-byte Spill
+ adcs r12, r2, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r2, r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #104] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ subs r1, r8, r1
+ sbcs r3, r9, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs r5, r4, r5
+ sbcs r6, r6, lr
+ sbcs r4, r12, r7
+ sbcs r11, r2, r11
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs lr, r0, r10
+ sbc r7, r2, #0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ands r7, r7, #1
+ movne r1, r8
+ movne r3, r9
+ str r1, [r2]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r3, [r2, #4]
+ movne r5, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r7, #0
+ movne r4, r12
+ str r5, [r2, #8]
+ movne r6, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r6, [r2, #12]
+ str r4, [r2, #16]
+ movne r11, r1
+ cmp r7, #0
+ movne lr, r0
+ str r11, [r2, #20]
+ str lr, [r2, #24]
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF7L
+ .align 2
+ .type mcl_fp_montNF7L,%function
+mcl_fp_montNF7L: @ @mcl_fp_montNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #40] @ 4-byte Spill
+ ldm r0, {r4, r12}
+ ldr r6, [r1, #4]
+ ldr r2, [r0, #8]
+ ldr r7, [r1]
+ ldr r0, [r0, #12]
+ ldr r5, [r1, #8]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ umull r9, r8, r6, r4
+ umull lr, r10, r7, r4
+ str r6, [sp, #52] @ 4-byte Spill
+ mov r11, r6
+ str r7, [sp, #96] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ adds r6, r10, r9
+ umull r6, r9, r5, r4
+ ldr r5, [r1, #20]
+ adcs r7, r8, r6
+ umlal r10, r6, r11, r4
+ umull r7, r8, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r9, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r7, r9, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ adcs r0, r8, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ umull r7, r0, r5, r4
+ adcs r5, r9, r7
+ ldr r7, [r3, #4]
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ str r7, [sp, #72] @ 4-byte Spill
+ umull r1, r9, r5, r4
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [r3]
+ adcs r0, r0, r1
+ ldr r1, [r3, #-4]
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r9, #0
+ ldr r9, [r3, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ str r5, [sp, #56] @ 4-byte Spill
+ mul r0, lr, r1
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r1, r2, r0, r5
+ str r9, [sp, #100] @ 4-byte Spill
+ adds r1, r1, lr
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r1, lr, r0, r7
+ adcs r11, r1, r10
+ umull r5, r1, r0, r9
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ adcs r9, r5, r6
+ str r1, [sp, #92] @ 4-byte Spill
+ umull r5, r10, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r7, r5, r1
+ ldr r1, [r3, #16]
+ str r1, [sp, #88] @ 4-byte Spill
+ umull r5, r8, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r4, r5, r1
+ ldr r1, [r3, #20]
+ str r1, [sp, #84] @ 4-byte Spill
+ umull r5, r6, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [r3, #24]
+ umull r3, r2, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ adc r3, r1, #0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adds r11, r11, r1
+ adcs r1, r9, lr
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ adcs r1, r4, r10
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r1, r5, r8
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r12, r1
+ umull r3, r4, r12, r2
+ adds r3, r0, r3
+ umull r1, r3, r12, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r4, r4, r1
+ umlal r0, r1, r12, r2
+ umull r4, r6, r12, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r10, r3, r4
+ umull r4, r3, r12, r5
+ adcs r8, r6, r4
+ umull r6, r4, r12, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r5, r3, r6
+ umull r6, r3, r12, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r6
+ adc r2, r3, #0
+ adds r3, r9, r11
+ adcs r0, r0, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r6, r10, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r11, r8, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r8, [sp, #72] @ 4-byte Reload
+ adcs r7, r5, r7
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r2, r2, #0
+ str r7, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ mul r2, r3, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ umull r4, r7, r2, r5
+ adds r3, r4, r3
+ str r7, [sp, #24] @ 4-byte Spill
+ umull r3, r7, r2, r8
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs lr, r3, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r3, r7, r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r12, r3, r1
+ str r7, [sp, #8] @ 4-byte Spill
+ umull r3, r10, r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r9, r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r5, r6, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ umull r6, r1, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r6, r6, r0
+ umull r7, r0, r2, r11
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r4, lr, r4
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r4, r12, r4
+ ldr r12, [sp, #52] @ 4-byte Reload
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ str r3, [sp, #20] @ 4-byte Spill
+ adcs r3, r5, r10
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r3, r6, r9
+ ldr r9, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ adc r0, r7, r0
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ umull r2, r6, r0, r12
+ umull r11, lr, r0, r1
+ adds r2, lr, r2
+ umull r1, r2, r0, r3
+ adcs r6, r6, r1
+ umlal lr, r1, r0, r12
+ umull r6, r3, r0, r5
+ adcs r5, r2, r6
+ umull r6, r2, r0, r4
+ adcs r10, r3, r6
+ umull r6, r3, r0, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r4, r2, r6
+ umull r6, r2, r0, r9
+ ldr r9, [sp, #56] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adc r2, r2, #0
+ adds r7, r11, r7
+ adcs r0, lr, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r10, r4, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r4, [sp, #92] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #24] @ 4-byte Spill
+ mul r2, r7, r3
+ umull r3, r5, r2, r9
+ adds r3, r3, r7
+ str r5, [sp, #20] @ 4-byte Spill
+ umull r3, r7, r2, r8
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r8, r3, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ umull r3, lr, r2, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r1, r3, r1
+ umull r3, r12, r2, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r5, r2, r4
+ adcs r6, r6, r11
+ umull r4, r11, r2, r7
+ adcs r4, r4, r10
+ umull r7, r10, r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r2, r7, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r7, r0, #0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adds r0, r8, r0
+ ldr r8, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, lr
+ ldr r3, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r6, r12
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r4, r5
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r2, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r7, r10
+ ldr r7, [sp, #80] @ 4-byte Reload
+ ldr r10, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r4, r0, r6, r1
+ umull r11, r2, r6, r3
+ adds r4, r2, r4
+ umull r3, r4, r6, r7
+ adcs r0, r0, r3
+ umlal r2, r3, r6, r1
+ umull r0, r7, r6, r8
+ adcs r5, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r4, r1, r6, r0
+ mov r0, r6
+ adcs r4, r7, r4
+ umull r7, r12, r6, r10
+ ldr r6, [sp, #68] @ 4-byte Reload
+ adcs lr, r1, r7
+ umull r7, r1, r0, r6
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r7, r12, r7
+ adc r12, r1, #0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adds r0, r11, r0
+ adcs r2, r2, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r6, r5, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r7, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ adc r1, r12, #0
+ ldr r12, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mul r4, r0, r1
+ umull r7, r1, r4, r9
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adds r0, r7, r0
+ umull r0, r7, r4, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs lr, r0, r2
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ umull r2, r0, r4, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r2, r2, r3
+ umull r3, r0, r4, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r5, r4, r1
+ adcs r6, r6, r11
+ umull r1, r11, r4, r7
+ umull r7, r9, r4, r12
+ ldr r12, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r4, r7, r0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r0, lr, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r4, r11
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r7, r9
+ ldr r9, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r4, [r9, #16]
+ umull r11, r3, r4, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ umull r0, r1, r4, r12
+ adds r0, r3, r0
+ umull r5, r0, r4, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r5
+ umlal r3, r5, r4, r12
+ umull r1, r7, r4, r8
+ adcs r8, r0, r1
+ umull r1, r0, r4, r2
+ adcs lr, r7, r1
+ umull r7, r1, r4, r10
+ adcs r2, r0, r7
+ umull r7, r0, r4, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r4, r11, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r5, r5, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r7, r8, r7
+ adcs r11, lr, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r10, r2, r6
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r1
+ umull r1, r6, r0, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adds r1, r1, r4
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ umull r1, r6, r0, r2
+ adcs lr, r1, r3
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ umull r3, r2, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ umull r5, r8, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r12, r0, r1
+ adcs r6, r7, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ umull r7, r1, r0, r4
+ adcs r7, r7, r10
+ umull r4, r10, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r2, lr, r2
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r11, r5, r2
+ adcs r2, r6, r8
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r2, r7, r12
+ ldr r7, [r9, #20]
+ adcs r0, r0, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, r10
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r4, r0, r7, r3
+ umull r10, r2, r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adds r4, r2, r4
+ umull r5, r4, r7, r1
+ adcs r0, r0, r5
+ umlal r2, r5, r7, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ umull r0, r1, r7, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs lr, r4, r0
+ umull r4, r0, r7, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r12, r1, r4
+ umull r4, r1, r7, r6
+ adcs r9, r0, r4
+ umull r4, r0, r7, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r4
+ adc r0, r0, #0
+ adds r4, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r5, r5, r11
+ adcs r7, lr, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r11, r12, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r9, r9, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r1
+ umull r1, r6, r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ adds r1, r1, r4
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ umull r1, r6, r0, r3
+ ldr r3, [sp, #100] @ 4-byte Reload
+ adcs r12, r1, r2
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ umull r2, r10, r0, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r5
+ umull r5, lr, r0, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r6, r0, r3
+ adcs r7, r7, r11
+ umull r3, r11, r0, r4
+ adcs r3, r3, r9
+ umull r4, r9, r0, r8
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r8, r12, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #16] @ 4-byte Spill
+ adcs r1, r5, r10
+ ldr r5, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ adcs r1, r7, lr
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, r3, r6
+ adcs r0, r0, r11
+ str r1, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r9, r4, r9
+ ldr r4, [r0, #24]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umull r6, lr, r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ umull r12, r1, r4, r5
+ umull r11, r2, r4, r0
+ mov r0, r6
+ mov r3, r2
+ adds r2, r2, r12
+ adcs r1, r1, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ umlal r3, r0, r4, r5
+ umull r1, r2, r4, r6
+ adcs r5, lr, r1
+ umull r6, r1, r4, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs lr, r2, r6
+ umull r6, r2, r4, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r12, r1, r6
+ umull r6, r1, r4, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r4, r11, r8
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, lr, r0
+ adcs r7, r12, r7
+ adcs r12, r2, r9
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adc lr, r1, #0
+ mul r1, r4, r2
+ umull r2, r8, r1, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r2, r2, r4
+ umull r2, r9, r1, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ umull r4, r0, r1, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r3, r4, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs r0, r2, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ umull r2, r0, r1, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r2, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r5, r0, r1, r11
+ adcs r2, r5, r10
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #76] @ 4-byte Reload
+ umull r6, r0, r1, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r0
+ umull r4, r0, r1, r5
+ adcs r1, r4, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r4, lr, #0
+ adds r8, r3, r8
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs lr, r3, r9
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adcs r12, r2, r3
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r3, r6, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #96] @ 4-byte Spill
+ adcs r2, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r9, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ subs r4, r8, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r6, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ sbcs r1, lr, r1
+ sbcs r7, r12, r7
+ sbcs r11, r3, r11
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r10, r2, r10
+ sbc r5, r9, r5
+ asr r0, r5, #31
+ cmp r0, #0
+ movlt r4, r8
+ movlt r1, lr
+ str r4, [r3]
+ ldr r4, [sp, #80] @ 4-byte Reload
+ movlt r6, r4
+ cmp r0, #0
+ str r6, [r3, #4]
+ str r1, [r3, #8]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ movlt r7, r12
+ movlt r10, r2
+ str r7, [r3, #12]
+ movlt r11, r1
+ cmp r0, #0
+ movlt r5, r9
+ str r11, [r3, #16]
+ str r10, [r3, #20]
+ str r5, [r3, #24]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed7L
+ .align 2
+ .type mcl_fp_montRed7L,%function
+mcl_fp_montRed7L: @ @mcl_fp_montRed7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #4]
+ ldr r10, [r2, #-4]
+ ldr r4, [r1]
+ ldr r3, [r2]
+ ldr r7, [r2, #8]
+ ldr r5, [r2, #4]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #8]
+ str r4, [sp, #60] @ 4-byte Spill
+ str r7, [sp, #108] @ 4-byte Spill
+ str r3, [sp, #116] @ 4-byte Spill
+ str r5, [sp, #24] @ 4-byte Spill
+ str r10, [sp, #92] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r0, [sp, #76] @ 4-byte Spill
+ mul r0, r4, r10
+ umull r4, r12, r0, r3
+ umull lr, r6, r0, r7
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r6, [sp, #72] @ 4-byte Spill
+ mov r9, lr
+ mov r3, r12
+ umlal r3, r9, r0, r5
+ umull r7, r6, r0, r4
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ str r7, [sp, #68] @ 4-byte Spill
+ str r6, [sp, #64] @ 4-byte Spill
+ umull r7, r6, r0, r4
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #12]
+ str r7, [sp, #44] @ 4-byte Spill
+ str r6, [sp, #48] @ 4-byte Spill
+ str r4, [sp, #96] @ 4-byte Spill
+ umull r8, r7, r0, r4
+ str r2, [sp, #100] @ 4-byte Spill
+ umull r4, r6, r0, r2
+ umull r11, r2, r0, r5
+ adds r0, r12, r11
+ ldr r11, [r1, #36]
+ adcs r0, r2, lr
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr lr, [r1, #28]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r5, r6, r8
+ ldr r8, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adds r0, r0, r2
+ ldr r2, [r1, #24]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [r1, #20]
+ mul r4, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ ldr r10, [r1, #40]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ umull r12, r1, r4, r8
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r9
+ ldr r9, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r7, r1, r4, r6
+ str r7, [sp, #28] @ 4-byte Spill
+ mov r7, r12
+ adcs r0, r3, r0
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, r1
+ umlal r0, r7, r4, r5
+ adcs r2, r2, r3
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r2, lr, r2
+ ldr lr, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #60] @ 4-byte Spill
+ adcs r2, r11, #0
+ mov r11, r5
+ str r2, [sp, #56] @ 4-byte Spill
+ adcs r2, r10, #0
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r3, r2, r4, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adds r1, r1, r3
+ adcs r2, r2, r12
+ umull r1, r3, r4, r9
+ umull r2, r12, r4, lr
+ adcs r2, r5, r2
+ adcs r10, r12, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ ldr r12, [sp, #92] @ 4-byte Reload
+ umull r5, r2, r4, r1
+ adcs r1, r3, r5
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ umull r5, r3, r4, r1
+ adcs r2, r2, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adds r4, r3, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r4, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r4, r8
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ umull r0, r2, r4, r6
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r6, r7, r6
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ mov r0, r2
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #64] @ 4-byte Reload
+ umlal r0, r5, r4, r11
+ adcs r6, r7, r6
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r6, r7, r6
+ umull r7, r8, r4, r1
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adc r6, r6, #0
+ str r6, [sp, #44] @ 4-byte Spill
+ umull r6, r10, r4, r11
+ adds r1, r2, r6
+ adcs r2, r10, r3
+ umull r1, r6, r4, lr
+ ldr lr, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r10, r2, r1
+ umull r2, r3, r4, r9
+ adcs r9, r6, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ umull r6, r1, r4, r2
+ adcs r3, r3, r6
+ adcs r1, r1, r7
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r8, r8, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r7, r3, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mul r7, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r3, r0, r7, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r4, r1, r7, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #36] @ 4-byte Spill
+ mov r4, r3
+ adcs r0, r5, r0
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #104] @ 4-byte Reload
+ adcs r5, r9, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, r1
+ str r5, [sp, #80] @ 4-byte Spill
+ ldr r5, [sp, #72] @ 4-byte Reload
+ umlal r0, r4, r7, r11
+ adcs r5, r6, r5
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #72] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r6, r8, r5
+ ldr r8, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adc r6, r6, #0
+ str r6, [sp, #48] @ 4-byte Spill
+ umull r9, r6, r7, r10
+ str r6, [sp, #44] @ 4-byte Spill
+ umull r6, r5, r7, r11
+ adds r1, r1, r6
+ umull r6, r12, r7, r2
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r2, r5, r3
+ umull r2, r3, r7, r8
+ adcs r1, r1, r2
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r5, r2, r7, r1
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r3, r3, r5
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r2, r2, r6
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r12, r9
+ ldr r9, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adds r6, r7, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r6, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ umull r7, r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r0, r2, r6, r5
+ mov r12, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umlal r4, r12, r6, r11
+ adcs r0, r3, r0
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r3, r0, r6, r10
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ umull lr, r0, r6, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r10, r0, r6, r11
+ adds r2, r2, r10
+ adcs r0, r0, r7
+ umull r2, r10, r6, r1
+ umull r0, r1, r6, r8
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r8, r6, r0
+ adcs r0, r1, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r10, lr
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc lr, r0, #0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adds r7, r2, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ mul r4, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ umull r7, r2, r4, r0
+ str r2, [sp, #40] @ 4-byte Spill
+ umull r2, r0, r4, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #44] @ 4-byte Spill
+ mov r6, r0
+ mov r2, r7
+ umlal r6, r2, r4, r11
+ adcs r5, r12, r5
+ ldr r12, [sp, #100] @ 4-byte Reload
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #80] @ 4-byte Reload
+ adcs r5, r8, r5
+ ldr r8, [sp, #104] @ 4-byte Reload
+ str r5, [sp, #80] @ 4-byte Spill
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r5, r10, r5
+ str r5, [sp, #72] @ 4-byte Spill
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, lr, r1
+ ldr lr, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r5, r1, r4, r8
+ str r5, [sp, #32] @ 4-byte Spill
+ str r1, [sp, #36] @ 4-byte Spill
+ umull r5, r1, r4, r3
+ str r5, [sp, #20] @ 4-byte Spill
+ umull r9, r5, r4, r11
+ str r1, [sp, #28] @ 4-byte Spill
+ adds r0, r0, r9
+ umull r3, r9, r4, lr
+ umull r0, r1, r4, r12
+ adcs r4, r5, r7
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r3
+ adcs r3, r9, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r9, [sp, #112] @ 4-byte Reload
+ adcs r7, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adc r5, r0, #0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adds r4, r0, r4
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r2, r2, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #84] @ 4-byte Spill
+ adcs r0, r10, r0
+ mov r10, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mul r0, r4, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r2, r7, r0, r11
+ umull r4, r3, r0, r1
+ adds r2, r3, r2
+ str r4, [sp, #92] @ 4-byte Spill
+ umull r1, r2, r0, r6
+ adcs r4, r7, r1
+ umlal r3, r1, r0, r11
+ umull r4, r5, r0, r12
+ adcs r2, r2, r4
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r4, r2, r0, lr
+ adcs r7, r5, r4
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r7, r4, r0, r9
+ adcs r5, r2, r7
+ umull r7, r2, r0, r8
+ adcs r7, r4, r7
+ adc r0, r2, #0
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adds r2, r2, r10
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r12, r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs lr, r1, r2
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r10, r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r4, r2, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r8, r5, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r8, [sp, #84] @ 4-byte Spill
+ adcs r2, r7, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #92] @ 4-byte Spill
+ adcs r1, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r3, r0, #0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r5, lr, r11
+ mov r11, r4
+ sbcs r6, r10, r6
+ sbcs r7, r4, r7
+ ldr r4, [sp, #96] @ 4-byte Reload
+ sbcs r4, r8, r4
+ sbcs r8, r2, r9
+ ldr r2, [sp, #104] @ 4-byte Reload
+ sbcs r9, r1, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r0, r12
+ movne r5, lr
+ movne r6, r10
+ cmp r3, #0
+ str r0, [r2]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ movne r7, r11
+ str r5, [r2, #4]
+ str r6, [r2, #8]
+ str r7, [r2, #12]
+ movne r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r4, [r2, #16]
+ movne r8, r0
+ cmp r3, #0
+ movne r9, r1
+ str r8, [r2, #20]
+ str r9, [r2, #24]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre7L
+ .align 2
+ .type mcl_fp_addPre7L,%function
+mcl_fp_addPre7L: @ @mcl_fp_addPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ ldr r3, [r1, #4]
+ ldr r9, [r1]
+ ldr r7, [r2]
+ ldr lr, [r1, #8]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ adds r7, r7, r9
+ str r3, [sp] @ 4-byte Spill
+ ldmib r2, {r1, r3, r4, r5, r12}
+ ldr r6, [sp, #4] @ 4-byte Reload
+ ldr r2, [r2, #24]
+ str r7, [r0]
+ adcs r1, r1, r6
+ ldr r6, [sp] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r4, r4, r10
+ adcs r5, r5, r11
+ adcs r6, r12, r6
+ adcs r2, r2, r8
+ stmib r0, {r1, r3, r4, r5, r6}
+ str r2, [r0, #24]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre7L
+ .align 2
+ .type mcl_fp_subPre7L,%function
+mcl_fp_subPre7L: @ @mcl_fp_subPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ ldr r3, [r2, #4]
+ ldr r9, [r2]
+ ldr r7, [r1]
+ ldr lr, [r2, #8]
+ ldr r10, [r2, #12]
+ ldr r11, [r2, #16]
+ ldr r8, [r2, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r7, r7, r9
+ str r3, [sp] @ 4-byte Spill
+ ldmib r1, {r2, r3, r4, r5, r12}
+ ldr r6, [sp, #4] @ 4-byte Reload
+ ldr r1, [r1, #24]
+ str r7, [r0]
+ sbcs r2, r2, r6
+ ldr r6, [sp] @ 4-byte Reload
+ sbcs r3, r3, lr
+ sbcs r4, r4, r10
+ sbcs r5, r5, r11
+ sbcs r6, r12, r6
+ sbcs r1, r1, r8
+ stmib r0, {r2, r3, r4, r5, r6}
+ str r1, [r0, #24]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_7L
+ .align 2
+ .type mcl_fp_shr1_7L,%function
+mcl_fp_shr1_7L: @ @mcl_fp_shr1_7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r5, [r1, #20]
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #24]
+ lsrs r6, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r6, lr, #1
+ orr r7, r3, r2, lsl #31
+ lsr r6, lr, #1
+ rrx r2, r2
+ lsrs r3, r5, #1
+ lsr r5, r5, #1
+ str r12, [r0]
+ str r7, [r0, #4]
+ orr r5, r5, r1, lsl #31
+ orr r6, r6, r4, lsl #31
+ rrx r3, r4
+ lsr r1, r1, #1
+ str r2, [r0, #8]
+ str r6, [r0, #12]
+ str r3, [r0, #16]
+ str r5, [r0, #20]
+ str r1, [r0, #24]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add7L
+ .align 2
+ .type mcl_fp_add7L,%function
+mcl_fp_add7L: @ @mcl_fp_add7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ ldr r7, [r1, #8]
+ ldr r10, [r1]
+ ldr r9, [r1, #4]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ ldm r2, {r1, r4, r5, r6, r12, lr}
+ ldr r2, [r2, #24]
+ adds r10, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r9
+ str r10, [r0]
+ adcs r5, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r6, r6, r1
+ mov r1, #0
+ adcs r9, r12, r11
+ adcs r7, lr, r7
+ stmib r0, {r4, r5, r6, r9}
+ adcs r2, r2, r8
+ str r7, [r0, #20]
+ adc r1, r1, #0
+ str r2, [r0, #24]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp] @ 4-byte Spill
+ ldmib r3, {r12, lr}
+ ldr r1, [r3, #20]
+ ldr r8, [r3, #12]
+ ldr r11, [r3, #16]
+ ldr r3, [r3, #24]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ subs r10, r10, r1
+ sbcs r1, r4, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbcs r5, r5, lr
+ sbcs r12, r6, r8
+ str r5, [sp] @ 4-byte Spill
+ sbcs lr, r9, r11
+ sbcs r4, r7, r4
+ sbcs r5, r2, r3
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB104_2
+@ BB#1: @ %nocarry
+ str r10, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp] @ 4-byte Reload
+ add r2, r0, #8
+ stm r2, {r1, r12, lr}
+ str r4, [r0, #20]
+ str r5, [r0, #24]
+.LBB104_2: @ %carry
+ add sp, sp, #12
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF7L
+ .align 2
+ .type mcl_fp_addNF7L,%function
+mcl_fp_addNF7L: @ @mcl_fp_addNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r6, r7}
+ ldr r11, [r1, #16]
+ ldr r9, [r1, #20]
+ ldr r8, [r1, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r10, r12, lr}
+ ldr r2, [r2, #24]
+ adds r7, r1, r6
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r6, r4, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r5, r5, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r4, r10, r1
+ ldr r10, [r3, #8]
+ adcs r12, r12, r11
+ ldr r11, [r3, #16]
+ adcs lr, lr, r9
+ ldr r9, [r3, #20]
+ adc r1, r2, r8
+ ldr r2, [r3]
+ ldr r8, [r3, #12]
+ str r1, [sp, #12] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #4]
+ ldr r3, [r3, #24]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ subs r2, r7, r2
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r7, r6, r7
+ sbcs r6, r5, r10
+ mov r10, r12
+ sbcs r8, r4, r8
+ sbcs r11, r12, r11
+ sbcs r12, lr, r9
+ ldr r9, [sp, #4] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r2, r9
+ movlt r6, r5
+ str r2, [r0]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movlt r7, r2
+ cmp r1, #0
+ movlt r8, r4
+ movlt r11, r10
+ movlt r12, lr
+ cmp r1, #0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ str r11, [r0, #16]
+ str r12, [r0, #20]
+ movlt r3, r1
+ str r3, [r0, #24]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub7L
+ .align 2
+ .type mcl_fp_sub7L,%function
+mcl_fp_sub7L: @ @mcl_fp_sub7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ ldr r7, [r2, #8]
+ ldr r11, [r2]
+ ldr r9, [r2, #4]
+ ldr r8, [r2, #20]
+ ldr r10, [r2, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldm r1, {r2, r4, r5, r6, r7, lr}
+ ldr r1, [r1, #24]
+ subs r12, r2, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r9, r4, r9
+ ldr r4, [sp, #4] @ 4-byte Reload
+ str r12, [r0]
+ str r9, [r0, #4]
+ sbcs r2, r5, r2
+ sbcs r11, r6, r4
+ ldr r4, [sp] @ 4-byte Reload
+ str r2, [r0, #8]
+ str r11, [r0, #12]
+ sbcs r4, r7, r4
+ sbcs r5, lr, r8
+ sbcs r6, r1, r10
+ add r1, r0, #16
+ stm r1, {r4, r5, r6}
+ mov r1, #0
+ sbc r1, r1, #0
+ tst r1, #1
+ beq .LBB106_2
+@ BB#1: @ %carry
+ ldr r1, [r3]
+ ldr r7, [r3, #4]
+ ldr lr, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r10, [r3, #20]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ ldr r3, [r3, #24]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adds r1, r1, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r7, r9
+ adcs r2, r1, r2
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r12, lr, r11
+ adcs r4, r8, r4
+ adcs r5, r10, r5
+ adc r3, r3, r6
+ stm r0, {r1, r7}
+ str r2, [r0, #8]
+ str r12, [r0, #12]
+ str r4, [r0, #16]
+ str r5, [r0, #20]
+ str r3, [r0, #24]
+.LBB106_2: @ %nocarry
+ add sp, sp, #12
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF7L
+ .align 2
+ .type mcl_fp_subNF7L,%function
+mcl_fp_subNF7L: @ @mcl_fp_subNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r5, lr}
+ ldr r7, [r2, #8]
+ ldr r11, [r2, #16]
+ ldr r10, [r2, #24]
+ add r9, r1, #12
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldm r1, {r2, r4, r12}
+ ldm r9, {r6, r8, r9}
+ ldr r7, [r1, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ subs r5, r2, r5
+ sbcs lr, r4, lr
+ sbcs r4, r12, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str lr, [sp] @ 4-byte Spill
+ sbcs r12, r6, r1
+ ldr r6, [r3, #4]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r2, r8, r11
+ ldr r8, [r3, #12]
+ ldr r11, [r3, #16]
+ str r2, [sp, #12] @ 4-byte Spill
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ sbcs r1, r9, r1
+ sbc r9, r7, r10
+ ldr r7, [r3]
+ ldr r10, [r3, #8]
+ ldr r3, [r3, #24]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adds r7, r5, r7
+ adcs r6, lr, r6
+ adcs lr, r4, r10
+ mov r10, r1
+ adcs r8, r12, r8
+ adcs r11, r2, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r2, r1, r2
+ asr r1, r9, #31
+ adc r3, r9, r3
+ cmp r1, #0
+ movge r7, r5
+ ldr r5, [sp] @ 4-byte Reload
+ movge lr, r4
+ str r7, [r0]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ movge r6, r5
+ cmp r1, #0
+ movge r8, r12
+ movge r11, r7
+ movge r2, r10
+ cmp r1, #0
+ str r6, [r0, #4]
+ str lr, [r0, #8]
+ movge r3, r9
+ str r8, [r0, #12]
+ str r11, [r0, #16]
+ str r2, [r0, #20]
+ str r3, [r0, #24]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add7L
+ .align 2
+ .type mcl_fpDbl_add7L,%function
+mcl_fpDbl_add7L: @ @mcl_fpDbl_add7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ ldm r1, {r12, lr}
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r4, [r2, #16]
+ ldr r11, [r2]
+ ldr r5, [r2, #12]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r9, r11, r12
+ ldr r11, [r1, #44]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ str r4, [sp, #16] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ ldr r2, [r2, #52]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r4, [sp, #36] @ 4-byte Spill
+ adcs r4, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r8
+ ldr r8, [r1, #52]
+ adcs r6, r5, r10
+ ldr r5, [r1, #32]
+ ldr r10, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r4, r7}
+ str r6, [r0, #12]
+ ldr r4, [sp, #8] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r4, r1
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r2, r4, r2
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [sp, #20] @ 4-byte Spill
+ adcs r2, r1, r5
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r5, r1, r7
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ mov r7, #0
+ str r12, [sp, #40] @ 4-byte Spill
+ adcs lr, r1, r11
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r4, r1, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r8
+ adc r1, r7, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r3, {r1, r7, r11}
+ ldr r10, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r6, [r3, #20]
+ ldr r3, [r3, #24]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ subs r1, r3, r1
+ sbcs r7, r2, r7
+ sbcs r2, r5, r11
+ mov r11, lr
+ sbcs r10, r12, r10
+ sbcs r12, lr, r8
+ sbcs lr, r4, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r8, r9, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r1, r3
+ movne r2, r5
+ str r1, [r0, #28]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ movne r7, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ cmp r6, #0
+ movne r12, r11
+ movne lr, r4
+ str r7, [r0, #32]
+ str r2, [r0, #36]
+ movne r10, r1
+ cmp r6, #0
+ movne r8, r9
+ str r10, [r0, #40]
+ str r12, [r0, #44]
+ str lr, [r0, #48]
+ str r8, [r0, #52]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub7L
+ .align 2
+ .type mcl_fpDbl_sub7L,%function
+mcl_fpDbl_sub7L: @ @mcl_fpDbl_sub7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ ldr r7, [r2, #32]
+ add r8, r1, #16
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldm r2, {r4, r7}
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #8]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ ldr r2, [r2, #20]
+ str r7, [sp, #36] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldmib r1, {r2, r12, lr}
+ ldm r8, {r5, r6, r8}
+ ldr r7, [r1, #28]
+ ldr r11, [r1]
+ ldr r9, [r1, #32]
+ ldr r10, [r1, #44]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ subs r4, r11, r4
+ str r4, [r0]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ ldr r1, [r1, #52]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r12, r12, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ stmib r0, {r2, r12}
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs lr, lr, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r2, r5, r2
+ str r2, [r0, #16]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ sbcs r2, r6, r2
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r8, r2
+ mov r8, #0
+ str r2, [r0, #24]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs lr, r7, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r4, r9, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r9, [r3, #20]
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs r7, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ sbcs r12, r6, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ sbcs r11, r10, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r10, [r3, #12]
+ sbcs r6, r6, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ sbcs r5, r1, r2
+ ldr r2, [r3, #8]
+ sbc r1, r8, #0
+ ldr r8, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adds r1, lr, r1
+ adcs r4, r4, r8
+ adcs r2, r7, r2
+ adcs r10, r12, r10
+ adcs r12, r11, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r8, r6, r9
+ adc r9, r5, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ ands r3, r3, #1
+ moveq r1, lr
+ moveq r2, r7
+ str r1, [r0, #28]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ moveq r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ cmp r3, #0
+ moveq r12, r11
+ moveq r8, r6
+ str r4, [r0, #32]
+ str r2, [r0, #36]
+ moveq r10, r1
+ cmp r3, #0
+ moveq r9, r5
+ str r10, [r0, #40]
+ str r12, [r0, #44]
+ str r8, [r0, #48]
+ str r9, [r0, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv256x32,%function
+.LmulPv256x32: @ @mulPv256x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r1, [r1, #28]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #28]
+ adc r1, r7, #0
+ str r1, [r0, #32]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 2
+ .type mcl_fp_mulUnitPre8L,%function
+mcl_fp_mulUnitPre8L: @ @mcl_fp_mulUnitPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #32]
+ add lr, sp, #16
+ ldr r12, [sp, #28]
+ ldm lr, {r1, r3, lr}
+ ldm sp, {r2, r5, r6, r7}
+ str r0, [r4, #32]
+ add r0, r4, #16
+ stm r4, {r2, r5, r6, r7}
+ stm r0, {r1, r3, lr}
+ str r12, [r4, #28]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 2
+ .type mcl_fpDbl_mulPre8L,%function
+mcl_fpDbl_mulPre8L: @ @mcl_fpDbl_mulPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre4L(PLT)
+ add r0, r4, #32
+ add r1, r5, #16
+ add r2, r6, #16
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldm r6, {r12, lr}
+ ldr r7, [r6, #16]
+ ldr r9, [r6, #8]
+ ldr r3, [r6, #12]
+ add r6, r6, #20
+ mov r8, #0
+ ldm r6, {r0, r1, r6}
+ adds r2, r12, r7
+ adcs r0, lr, r0
+ str r2, [sp, #56] @ 4-byte Spill
+ adcs r1, r9, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r9, [r5]
+ str r1, [sp, #44] @ 4-byte Spill
+ adcs r1, r3, r6
+ str r1, [sp, #48] @ 4-byte Spill
+ adc r6, r8, #0
+ ldmib r5, {r8, r10, r12}
+ ldr r7, [r5, #16]
+ ldr r3, [r5, #20]
+ ldr lr, [r5, #24]
+ ldr r11, [r5, #28]
+ str r2, [sp, #60]
+ str r0, [sp, #64]
+ mov r0, #0
+ add r2, sp, #60
+ adds r5, r9, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r8, r8, r3
+ str r5, [sp, #76]
+ adcs r10, r10, lr
+ str r8, [sp, #80]
+ adcs r9, r12, r11
+ str r10, [sp, #84]
+ str r7, [sp, #68]
+ str r1, [sp, #72]
+ adc r11, r0, #0
+ add r0, sp, #92
+ add r1, sp, #76
+ str r9, [sp, #88]
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ cmp r6, #0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ and r12, r6, r11
+ ldr lr, [sp, #120]
+ moveq r5, r6
+ moveq r9, r6
+ moveq r10, r6
+ moveq r8, r6
+ ldr r6, [sp, #116]
+ adds r0, r5, r0
+ adcs r1, r8, r1
+ adcs r2, r10, r7
+ mov r7, #0
+ adcs r3, r9, r3
+ adc r7, r7, #0
+ cmp r11, #0
+ moveq r0, r5
+ ldr r5, [sp, #108]
+ moveq r2, r10
+ moveq r3, r9
+ moveq r7, r11
+ moveq r1, r8
+ adds r8, r0, r5
+ ldr r5, [sp, #112]
+ adcs r10, r1, r5
+ adcs r9, r2, r6
+ ldr r6, [r4]
+ ldmib r4, {r5, r11}
+ ldr r2, [sp, #92]
+ adcs lr, r3, lr
+ add r3, sp, #96
+ adc r12, r7, r12
+ ldr r7, [r4, #12]
+ ldm r3, {r0, r1, r3}
+ subs r2, r2, r6
+ str r2, [sp, #52] @ 4-byte Spill
+ sbcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r1, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r3, r7
+ ldr r7, [r4, #20]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r8, r0
+ ldr r8, [r4, #28]
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r10, r7
+ ldr r10, [r4, #24]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r9, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, lr, r8
+ add lr, r4, #32
+ str r0, [sp, #16] @ 4-byte Spill
+ sbc r0, r12, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r5, r9, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ ldr r12, [r4, #44]
+ ldr r2, [r4, #48]
+ ldr r0, [r4, #52]
+ ldr r1, [r4, #56]
+ ldr r3, [r4, #60]
+ subs r6, r6, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #32] @ 4-byte Spill
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ sbcs r11, r6, r9
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r6, r6, lr
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r6, r6, r12
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r2
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r6, r6, r0
+ str r6, [sp, #40] @ 4-byte Spill
+ mov r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adds r3, r0, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r3, [r4, #16]
+ str r7, [r4, #20]
+ adcs r3, r10, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r3, [r4, #24]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r8, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [r4, #32]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [r4, #36]
+ adcs r0, lr, r0
+ str r0, [r4, #40]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ add r12, r4, #48
+ str r0, [r4, #44]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r1, r6, #0
+ adcs r2, r2, #0
+ adc r3, r3, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 2
+ .type mcl_fpDbl_sqrPre8L,%function
+mcl_fpDbl_sqrPre8L: @ @mcl_fpDbl_sqrPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre4L(PLT)
+ add r1, r5, #16
+ add r0, r4, #32
+ mov r2, r1
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldm r5, {r0, r8, lr}
+ ldr r3, [r5, #16]
+ ldr r2, [r5, #20]
+ ldr r6, [r5, #24]
+ ldr r12, [r5, #12]
+ ldr r1, [r5, #28]
+ adds r9, r0, r3
+ add r0, sp, #64
+ adcs r5, r8, r2
+ str r9, [sp, #76]
+ str r9, [sp, #60]
+ add r2, sp, #60
+ adcs r6, lr, r6
+ str r5, [sp, #80]
+ adcs r7, r12, r1
+ str r6, [sp, #84]
+ add r1, sp, #76
+ str r7, [sp, #88]
+ stm r0, {r5, r6, r7}
+ mov r0, #0
+ adc r8, r0, #0
+ add r0, sp, #92
+ bl mcl_fpDbl_mulPre4L(PLT)
+ adds r12, r9, r9
+ adcs lr, r5, r5
+ adcs r9, r6, r6
+ add r6, sp, #112
+ ldm r6, {r0, r5, r6}
+ ldr r1, [sp, #108]
+ adc r10, r7, r7
+ adds r2, r1, r12
+ adcs r3, r0, lr
+ adcs r12, r5, r9
+ adcs lr, r6, r10
+ adc r7, r8, r7, lsr #31
+ cmp r8, #0
+ moveq lr, r6
+ add r6, sp, #92
+ moveq r7, r8
+ moveq r12, r5
+ moveq r3, r0
+ moveq r2, r1
+ ldm r4, {r8, r9, r10, r11}
+ ldm r6, {r0, r1, r5, r6}
+ subs r0, r0, r8
+ ldr r8, [r4, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ sbcs r0, r1, r9
+ ldr r9, [r4, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r5, r10
+ ldr r10, [r4, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r6, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r3, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r12, r9
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, lr, r10
+ add lr, r4, #32
+ str r0, [sp, #16] @ 4-byte Spill
+ sbc r0, r7, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r5, r7, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ ldr r12, [r4, #44]
+ ldr r2, [r4, #48]
+ ldr r0, [r4, #52]
+ ldr r1, [r4, #56]
+ ldr r3, [r4, #60]
+ subs r6, r6, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #32] @ 4-byte Spill
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ sbcs r11, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r6, r6, lr
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r6, r6, r12
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r2
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r6, r6, r0
+ str r6, [sp, #40] @ 4-byte Spill
+ mov r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adds r3, r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r1, r11, r8
+ str r3, [r4, #16]
+ str r1, [r4, #20]
+ adcs r3, r0, r9
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r3, [r4, #24]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r0, r10
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r4, #32]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [r4, #36]
+ adcs r0, r0, lr
+ str r0, [r4, #40]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ add r12, r4, #48
+ str r0, [r4, #44]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r1, r6, #0
+ adcs r2, r2, #0
+ adc r3, r3, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont8L
+ .align 2
+ .type mcl_fp_mont8L,%function
+mcl_fp_mont8L: @ @mcl_fp_mont8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #724
+ sub sp, sp, #724
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #680
+ str r3, [sp, #64] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r11, r1
+ ldr r2, [r7]
+ str r7, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #72] @ 4-byte Spill
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #684]
+ ldr r9, [sp, #680]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ mul r2, r9, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #672]
+ add r10, sp, #644
+ ldr r4, [sp, #656]
+ ldr r6, [sp, #640]
+ mov r1, r11
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r8, r10}
+ ldr r2, [r7, #4]
+ add r0, sp, #600
+ bl .LmulPv256x32(PLT)
+ adds r0, r6, r9
+ ldr r2, [sp, #12] @ 4-byte Reload
+ mov r1, #0
+ add r12, sp, #604
+ ldr r9, [sp, #628]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #632]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #600]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r1, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r6, r12}
+ ldr lr, [sp, #48] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adds r4, lr, r4
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r10, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r1, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #560
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #592]
+ ldr r5, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #576]
+ ldr r10, [sp, #560]
+ ldr r11, [sp, #564]
+ ldr r8, [sp, #568]
+ ldr r9, [sp, #572]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ ldr r2, [r5, #8]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #520
+ bl .LmulPv256x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r4, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #552]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #548]
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #480
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #512]
+ ldr r2, [r5, #12]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #500]
+ ldr r6, [sp, #496]
+ ldr r10, [sp, #480]
+ ldr r11, [sp, #484]
+ ldr r8, [sp, #488]
+ ldr r9, [sp, #492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #472]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #468]
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #464]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r5, r0
+ adcs r0, r10, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r6, r4
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #432]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #416]
+ ldr r10, [sp, #400]
+ ldr r11, [sp, #404]
+ ldr r8, [sp, #408]
+ ldr r9, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #360
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #392]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #388]
+ adcs r0, r0, r4
+ ldr r4, [sp, #384]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ mul r2, r7, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #352]
+ ldr r6, [sp, #340]
+ ldr r4, [sp, #336]
+ ldr r10, [sp, #320]
+ ldr r11, [sp, #324]
+ ldr r8, [sp, #328]
+ ldr r9, [sp, #332]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #280
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #312]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #308]
+ adcs r0, r0, r4
+ ldr r4, [sp, #304]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r6, r4
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #272]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #256]
+ ldr r10, [sp, #240]
+ ldr r11, [sp, #244]
+ ldr r8, [sp, #248]
+ ldr r9, [sp, #252]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #200
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #232]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #228]
+ adcs r0, r0, r4
+ ldr r4, [sp, #224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ mul r2, r7, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #192]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r6, [sp, #184]
+ ldr r4, [sp, #180]
+ ldr r5, [sp, #176]
+ ldr r10, [sp, #160]
+ ldr r11, [sp, #164]
+ ldr r8, [sp, #168]
+ ldr r9, [sp, #172]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #120
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ ldr r12, [sp, #124]
+ ldr r3, [sp, #128]
+ add lr, sp, #136
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r8, r1, r8
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r9, r1, r9
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r1, r4
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #132]
+ adcs r1, r1, r6
+ ldr r6, [sp, #152]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r2
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r5, r0, r2
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r8, r8, r12
+ str r1, [sp, #52] @ 4-byte Spill
+ adcs r3, r9, r3
+ mul r7, r5, r0
+ ldm lr, {r0, r1, r2, lr}
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #76] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r9, r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r3, [sp, #44] @ 4-byte Spill
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r10, r2
+ mov r2, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r6
+ mov r0, #0
+ adc r11, r0, #0
+ add r0, sp, #80
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs lr, r8, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str lr, [sp, #40] @ 4-byte Spill
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r3
+ ldr r0, [sp, #96]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r9, r9, r0
+ ldr r0, [sp, #100]
+ adcs r12, r1, r0
+ ldr r0, [sp, #104]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r12, [sp, #68] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #108]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #72] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #112]
+ adcs r5, r10, r0
+ adc r0, r11, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm r4, {r1, r2, r3, r11}
+ ldr r0, [r4, #16]
+ ldr r10, [r4, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ subs r1, lr, r1
+ ldr lr, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ sbcs r2, lr, r2
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r3, r7, r3
+ sbcs r7, r9, r11
+ mov r11, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r12, r0
+ sbcs r12, r8, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbcs r8, r6, r10
+ mov r10, r5
+ sbcs r4, r5, r4
+ ldr r5, [sp, #76] @ 4-byte Reload
+ sbc r6, r5, #0
+ ldr r5, [sp, #40] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r2, lr
+ movne r1, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r1, [r5]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r5, #4]
+ movne r3, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r6, #0
+ movne r7, r9
+ str r3, [r5, #8]
+ str r7, [r5, #12]
+ movne r0, r1
+ str r0, [r5, #16]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ movne r12, r0
+ cmp r6, #0
+ movne r8, r11
+ movne r4, r10
+ str r12, [r5, #20]
+ str r8, [r5, #24]
+ str r4, [r5, #28]
+ add sp, sp, #724
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF8L
+ .align 2
+ .type mcl_fp_montNF8L,%function
+mcl_fp_montNF8L: @ @mcl_fp_montNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #716
+ sub sp, sp, #716
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #672
+ str r3, [sp, #60] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r10, r1
+ ldr r2, [r7]
+ str r7, [sp, #56] @ 4-byte Spill
+ str r5, [sp, #64] @ 4-byte Spill
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #676]
+ ldr r11, [sp, #672]
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ mul r2, r11, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #664]
+ ldr r2, [r7, #4]
+ ldr r4, [sp, #648]
+ ldr r6, [sp, #632]
+ ldr r8, [sp, #636]
+ ldr r5, [sp, #640]
+ ldr r9, [sp, #644]
+ mov r1, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #652]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv256x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add r6, sp, #596
+ ldr r12, [sp, #616]
+ ldr r3, [sp, #612]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #620]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r9, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #592]
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r1, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r6, {r0, r1, r2, r6}
+ ldr lr, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adds r4, lr, r4
+ adcs r0, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r9, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r7, r12
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ mov r1, r7
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #584]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r9, [sp, #568]
+ ldr r10, [sp, #552]
+ ldr r11, [sp, #556]
+ ldr r8, [sp, #560]
+ ldr r6, [sp, #564]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #512
+ bl .LmulPv256x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #516
+ ldr r4, [sp, #536]
+ ldr r3, [sp, #512]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #540]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adds r9, r6, r3
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #472
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #504]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #492]
+ ldr r7, [sp, #488]
+ ldr r10, [sp, #472]
+ ldr r11, [sp, #476]
+ ldr r8, [sp, #480]
+ ldr r6, [sp, #484]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #432
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r3, [sp, #432]
+ add lr, sp, #436
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #460]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #456]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r7, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #392
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #424]
+ ldr r5, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #408]
+ ldr r10, [sp, #392]
+ ldr r11, [sp, #396]
+ ldr r8, [sp, #400]
+ ldr r6, [sp, #404]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ ldr r2, [r5, #16]
+ mov r1, r7
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #352]
+ add lr, sp, #356
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #380]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #376]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #312
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #344]
+ ldr r2, [r5, #20]
+ ldr r4, [sp, #328]
+ ldr r10, [sp, #312]
+ ldr r11, [sp, #316]
+ ldr r8, [sp, #320]
+ ldr r6, [sp, #324]
+ mov r1, r7
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #272]
+ add lr, sp, #276
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #300]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r0, r4
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #296]
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r2, r9, r7
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #248]
+ ldr r10, [sp, #232]
+ ldr r11, [sp, #236]
+ ldr r8, [sp, #240]
+ ldr r6, [sp, #244]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #192
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #192]
+ add lr, sp, #196
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #220]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #216]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #184]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #176]
+ ldr r4, [sp, #172]
+ ldr r7, [sp, #168]
+ ldr r10, [sp, #152]
+ ldr r11, [sp, #156]
+ ldr r8, [sp, #160]
+ ldr r6, [sp, #164]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #112
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #128
+ ldr r12, [sp, #116]
+ ldr r3, [sp, #120]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r11, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r4
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #124]
+ adcs r1, r1, r5
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #112]
+ str r1, [sp, #40] @ 4-byte Spill
+ adds r5, r0, r2
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r9, r5, r0
+ ldm lr, {r0, r1, r2, r6, lr}
+ ldr r8, [sp, #68] @ 4-byte Reload
+ adcs r7, r8, r12
+ ldr r8, [sp, #60] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r3, r7, r3
+ adcs r11, r11, r4
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r4, r10, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r0, r6
+ add r0, sp, #72
+ adc r7, lr, #0
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88]
+ adcs r3, r4, r0
+ ldr r0, [sp, #92]
+ str r3, [sp, #40] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #96]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #64] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #100]
+ ldr r1, [sp, #104]
+ str r12, [sp, #68] @ 4-byte Spill
+ adcs r11, r10, r0
+ adc r4, r7, r1
+ ldm r8, {r1, r2, r9, r10}
+ ldr r0, [r8, #20]
+ ldr r7, [r8, #16]
+ ldr lr, [r8, #28]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r8, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, r5
+ subs r5, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r8, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ sbcs r9, r2, r9
+ sbcs r10, r3, r10
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r7, r6, r7
+ sbcs r6, r12, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ sbcs r12, r11, r3
+ sbc lr, r4, lr
+ cmp lr, #0
+ movlt r5, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movlt r8, r1
+ movlt r9, r2
+ cmp lr, #0
+ movlt r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ movlt r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ movlt r6, r0
+ cmp lr, #0
+ movlt lr, r4
+ ldr r4, [sp, #52] @ 4-byte Reload
+ movlt r12, r11
+ add r0, r4, #20
+ stm r4, {r5, r8, r9, r10}
+ str r7, [r4, #16]
+ stm r0, {r6, r12, lr}
+ add sp, sp, #716
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed8L
+ .align 2
+ .type mcl_fp_montRed8L,%function
+mcl_fp_montRed8L: @ @mcl_fp_montRed8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #420
+ sub sp, sp, #420
+ mov r5, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r4, [r1]
+ ldr r9, [r1, #40]
+ ldr r10, [r1, #44]
+ ldr r0, [r5]
+ ldr r11, [r5, #-4]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ mul r2, r4, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ mov r1, r5
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #396
+ ldr r8, [sp, #408]
+ add r6, sp, #384
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #376]
+ ldr r1, [sp, #380]
+ ldm r6, {r0, r2, r6}
+ adds r4, r4, r7
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r9, r9, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r10, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #356
+ ldr r8, [sp, #368]
+ add r6, sp, #340
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #336]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv256x32(PLT)
+ add r8, sp, #320
+ add lr, sp, #300
+ ldm r8, {r6, r7, r8}
+ ldr r1, [sp, #296]
+ ldm lr, {r0, r2, r3, r12, lr}
+ adds r1, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r10, r2
+ mul r2, r4, r11
+ adcs r9, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #276
+ ldr r8, [sp, #288]
+ add r6, sp, #260
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #256]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ adcs r4, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r9, r1
+ mov r1, r5
+ adcs r10, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv256x32(PLT)
+ add r8, sp, #240
+ add lr, sp, #220
+ ldm r8, {r6, r7, r8}
+ ldr r1, [sp, #216]
+ ldm lr, {r0, r2, r3, r12, lr}
+ adds r1, r4, r1
+ adcs r4, r9, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r10, r2
+ mul r2, r4, r11
+ adcs r9, r0, r3
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #196
+ ldr r8, [sp, #208]
+ add r6, sp, #180
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #176]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ adcs r4, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r9, r1
+ mov r1, r5
+ adcs r9, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r6, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv256x32(PLT)
+ add r12, sp, #136
+ ldm r12, {r0, r1, r3, r12}
+ adds r0, r4, r0
+ adcs r4, r7, r1
+ ldr r7, [sp, #152]
+ ldr r0, [sp, #168]
+ adcs r1, r9, r3
+ ldr r3, [sp, #160]
+ mul r2, r4, r11
+ adcs r9, r6, r12
+ ldr r6, [sp, #156]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #164]
+ adcs r10, r10, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r8, r7, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r11, r3, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #96
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #96
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #112]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r9, r9, r2
+ adcs r10, r10, r3
+ adcs r3, r6, r0
+ ldr r0, [sp, #116]
+ str r3, [sp, #36] @ 4-byte Spill
+ adcs lr, r8, r0
+ ldr r0, [sp, #120]
+ str lr, [sp, #40] @ 4-byte Spill
+ adcs r7, r11, r0
+ ldr r0, [sp, #124]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #128]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r8, r0, #0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ subs r1, r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r2, r9, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r6, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r3, r0
+ ldr r3, [sp, #68] @ 4-byte Reload
+ sbcs r11, lr, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ sbcs r4, r12, r7
+ sbc r7, r8, #0
+ ands r7, r7, #1
+ movne r1, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ movne r2, r9
+ movne r6, r10
+ cmp r7, #0
+ str r1, [r5]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [r5, #4]
+ str r6, [r5, #8]
+ movne r0, r1
+ str r0, [r5, #12]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r11, [r5, #16]
+ movne r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ movne r4, r12
+ str r3, [r5, #20]
+ movne lr, r0
+ str lr, [r5, #24]
+ str r4, [r5, #28]
+ add sp, sp, #420
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre8L
+ .align 2
+ .type mcl_fp_addPre8L,%function
+mcl_fp_addPre8L: @ @mcl_fp_addPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldr r3, [r1, #4]
+ ldr r9, [r1]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r1, #8]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r1, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldm r2, {r1, r3, r4, r5, r12, lr}
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r1, r1, r9
+ adcs r3, r3, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r5, r5, r10
+ adcs r12, r12, r11
+ adcs lr, lr, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r1, r3, r4, r5, r12, lr}
+ adcs r6, r6, r7
+ adcs r2, r2, r8
+ str r6, [r0, #24]
+ str r2, [r0, #28]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre8L
+ .align 2
+ .type mcl_fp_subPre8L,%function
+mcl_fp_subPre8L: @ @mcl_fp_subPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldr r3, [r2, #4]
+ ldr r9, [r2]
+ ldr r10, [r2, #12]
+ ldr r11, [r2, #16]
+ ldr r8, [r2, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #8]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldm r1, {r2, r3, r4, r5, r12, lr}
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r2, r2, r9
+ sbcs r3, r3, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r7
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r5, r5, r10
+ sbcs r12, r12, r11
+ sbcs lr, lr, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r2, r3, r4, r5, r12, lr}
+ sbcs r6, r6, r7
+ sbcs r1, r1, r8
+ str r6, [r0, #24]
+ str r1, [r0, #28]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_8L
+ .align 2
+ .type mcl_fp_shr1_8L,%function
+mcl_fp_shr1_8L: @ @mcl_fp_shr1_8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ add r6, r1, #16
+ ldr r2, [r1, #8]
+ ldm r6, {r4, r5, r6}
+ ldr r1, [r1, #28]
+ lsrs r7, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r7, lr, #1
+ orr r8, r3, r2, lsl #31
+ lsr r7, lr, #1
+ rrx r2, r2
+ lsrs r3, r5, #1
+ lsr r5, r5, #1
+ str r12, [r0]
+ str r8, [r0, #4]
+ orr r7, r7, r4, lsl #31
+ rrx r3, r4
+ lsrs r4, r1, #1
+ str r2, [r0, #8]
+ orr r5, r5, r6, lsl #31
+ lsr r1, r1, #1
+ add r2, r0, #16
+ rrx r6, r6
+ str r7, [r0, #12]
+ stm r2, {r3, r5, r6}
+ str r1, [r0, #28]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add8L
+ .align 2
+ .type mcl_fp_add8L,%function
+mcl_fp_add8L: @ @mcl_fp_add8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldr r7, [r1, #12]
+ ldr lr, [r1]
+ ldr r11, [r1, #4]
+ ldr r10, [r1, #8]
+ add r8, r2, #20
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r1, [sp, #4] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r12}
+ ldr r9, [r2, #16]
+ ldm r8, {r6, r7, r8}
+ ldr r2, [sp] @ 4-byte Reload
+ adds lr, r1, lr
+ adcs r1, r4, r11
+ str lr, [r0]
+ adcs r4, r5, r10
+ ldr r5, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ str r4, [sp, #20] @ 4-byte Spill
+ adcs r10, r12, r5
+ adcs r5, r9, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r5, [sp, #16] @ 4-byte Spill
+ adcs r12, r6, r2
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ stmib r0, {r1, r4, r10}
+ mov r1, #0
+ str r5, [r0, #16]
+ str r12, [r0, #20]
+ adcs r7, r7, r6
+ mov r6, r12
+ adcs r11, r8, r2
+ str r7, [r0, #24]
+ mov r8, lr
+ adc r1, r1, #0
+ str r11, [r0, #28]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r1, r2, r9, r12, lr}
+ ldr r4, [r3, #20]
+ ldr r5, [r3, #24]
+ ldr r3, [r3, #28]
+ subs r1, r8, r1
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r8, r1, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r2, r1, r9
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r12, r10, r12
+ sbcs lr, r1, lr
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r4, r6, r4
+ sbcs r5, r7, r5
+ sbcs r6, r11, r3
+ sbc r3, r1, #0
+ tst r3, #1
+ bne .LBB120_2
+@ BB#1: @ %nocarry
+ ldr r1, [sp, #8] @ 4-byte Reload
+ stm r0, {r1, r8}
+ add r1, r0, #8
+ add r0, r0, #20
+ stm r1, {r2, r12, lr}
+ stm r0, {r4, r5, r6}
+.LBB120_2: @ %carry
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF8L
+ .align 2
+ .type mcl_fp_addNF8L,%function
+mcl_fp_addNF8L: @ @mcl_fp_addNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r6, r8}
+ ldr r7, [r1, #8]
+ ldr r9, [r1, #28]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r12, lr}
+ ldr r10, [r2, #20]
+ ldr r11, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r7, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r6, r4, r8
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r8, r5, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r1, r12, r1
+ adcs r12, lr, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs lr, r10, r5
+ adcs r5, r11, r4
+ ldr r4, [r3, #4]
+ ldr r11, [r3, #16]
+ str lr, [sp, #24] @ 4-byte Spill
+ adc r10, r2, r9
+ ldr r2, [r3]
+ ldr r9, [r3, #12]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ ldr r3, [r3, #28]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ subs r2, r7, r2
+ sbcs r7, r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r6, r8, r4
+ sbcs r9, r1, r9
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r4, r12, r11
+ mov r11, r12
+ sbcs r12, lr, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs lr, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbc r3, r10, r3
+ cmp r3, #0
+ movlt r6, r8
+ movlt r2, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movlt r7, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp r3, #0
+ movlt r4, r11
+ movlt r9, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ stm r0, {r2, r7}
+ str r6, [r0, #8]
+ str r9, [r0, #12]
+ movlt r12, r1
+ cmp r3, #0
+ add r1, r0, #16
+ movlt lr, r5
+ movlt r3, r10
+ stm r1, {r4, r12, lr}
+ str r3, [r0, #28]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub8L
+ .align 2
+ .type mcl_fp_sub8L,%function
+mcl_fp_sub8L: @ @mcl_fp_sub8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r9, [r2, #20]
+ ldr r10, [r2, #24]
+ add r8, r1, #12
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #12]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #28]
+ str r4, [sp] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm r1, {r4, r5, r11}
+ ldm r8, {r2, r7, r8}
+ ldr r6, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r12, r4, r12
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs lr, r5, lr
+ sbcs r11, r11, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ sbcs r2, r2, r4
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r12, lr}
+ str r11, [r0, #8]
+ sbcs r5, r8, r9
+ sbcs r6, r6, r10
+ sbcs r7, r1, r7
+ add r1, r0, #12
+ stm r1, {r2, r4, r5, r6, r7}
+ mov r1, #0
+ sbc r1, r1, #0
+ tst r1, #1
+ beq .LBB122_2
+@ BB#1: @ %carry
+ ldr r1, [r3]
+ add r10, r3, #12
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r1, [r3, #24]
+ ldr r3, [r3, #28]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r1, r1, r12
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, lr
+ ldr r1, [sp] @ 4-byte Reload
+ adcs lr, r1, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r8, r2
+ adcs r4, r9, r4
+ adcs r5, r10, r5
+ adcs r6, r1, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adc r3, r3, r7
+ stm r0, {r1, r12, lr}
+ add r1, r0, #12
+ stm r1, {r2, r4, r5, r6}
+ str r3, [r0, #28]
+.LBB122_2: @ %nocarry
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF8L
+ .align 2
+ .type mcl_fp_subNF8L,%function
+mcl_fp_subNF8L: @ @mcl_fp_subNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r2, {r6, r8}
+ ldr r7, [r2, #8]
+ ldr r11, [r2, #12]
+ ldr r9, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldm r1, {r2, r4, r5, r12, lr}
+ ldr r10, [r1, #20]
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r6, r2, r6
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r8, r4, r8
+ ldr r4, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ sbcs r5, r5, r2
+ sbcs r2, r12, r11
+ ldr r11, [r3, #12]
+ sbcs r12, lr, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs lr, r10, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r10, [r3, #16]
+ str lr, [sp, #28] @ 4-byte Spill
+ sbcs r4, r7, r4
+ ldr r7, [r3]
+ sbc r1, r1, r9
+ ldr r9, [r3, #8]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r3, #4]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r3, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ ldr r3, [r3, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r7, r6, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r6, r8, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r9, r5, r9
+ adcs r11, r2, r11
+ adcs r2, r12, r10
+ ldr r10, [sp, #16] @ 4-byte Reload
+ adcs r12, lr, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs lr, r4, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adc r3, r1, r3
+ cmp r1, #0
+ movge r9, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ movge r7, r10
+ movge r6, r8
+ cmp r1, #0
+ str r7, [r0]
+ movge r11, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ movge r2, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ stmib r0, {r6, r9, r11}
+ movge r12, r5
+ cmp r1, #0
+ movge r3, r1
+ movge lr, r4
+ add r1, r0, #16
+ stm r1, {r2, r12, lr}
+ str r3, [r0, #28]
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add8L
+ .align 2
+ .type mcl_fpDbl_add8L,%function
+mcl_fpDbl_add8L: @ @mcl_fpDbl_add8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ ldm r1, {r7, r9}
+ ldr r6, [r1, #8]
+ ldr r8, [r1, #12]
+ ldm r2, {r4, r12, lr}
+ ldr r5, [r2, #12]
+ adds r4, r4, r7
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ adcs r7, r12, r9
+ adcs r6, lr, r6
+ add lr, r1, #16
+ adcs r9, r5, r8
+ ldr r5, [r2, #28]
+ add r8, r2, #16
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r5, [sp, #28] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #60]
+ str r4, [sp, #64] @ 4-byte Spill
+ ldm r8, {r4, r5, r8}
+ ldr r2, [r1, #36]
+ ldr r10, [r1, #32]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r11, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r9, [r0, #12]
+ ldr r6, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ adcs r1, r8, r12
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ adcs r1, r1, r10
+ str r2, [r0, #28]
+ ldr r2, [sp] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r7, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r2, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r12, r1, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ adcs lr, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r5, r1, r4
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs r8, r1, r4
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r10, r1, r4
+ mov r1, #0
+ adc r1, r1, #0
+ str r10, [sp, #60] @ 4-byte Spill
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldmib r3, {r4, r11}
+ ldr r6, [r3, #12]
+ ldr r1, [r3, #24]
+ ldr r9, [r3, #16]
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ ldr r3, [r3, #28]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ subs r1, r3, r1
+ sbcs r4, r7, r4
+ sbcs r11, r2, r11
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, lr, r9
+ mov r9, r8
+ sbcs lr, r5, r6
+ ldr r5, [sp, #28] @ 4-byte Reload
+ sbcs r6, r8, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r8, r10, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r10, r5, #0
+ ands r10, r10, #1
+ movne r1, r3
+ movne r4, r7
+ str r1, [r0, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [r0, #36]
+ movne r11, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r10, #0
+ str r11, [r0, #40]
+ movne r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #44]
+ movne r12, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r12, [r0, #48]
+ movne lr, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r10, #0
+ movne r6, r9
+ str lr, [r0, #52]
+ str r6, [r0, #56]
+ movne r8, r1
+ str r8, [r0, #60]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub8L
+ .align 2
+ .type mcl_fpDbl_sub8L,%function
+mcl_fpDbl_sub8L: @ @mcl_fpDbl_sub8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r7, [r2, #32]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldm r2, {r4, r5, r8}
+ ldr r6, [r2, #20]
+ ldr r7, [r2, #12]
+ ldr r9, [r2, #16]
+ ldr r11, [r2, #24]
+ ldr r10, [r2, #28]
+ str r6, [sp, #28] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r6, [r1, #12]
+ subs r4, r2, r4
+ ldr r2, [r1, #32]
+ sbcs r5, r12, r5
+ ldr r12, [r1, #36]
+ sbcs lr, lr, r8
+ add r8, r1, #16
+ sbcs r6, r6, r7
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r8, {r1, r2, r7, r8}
+ stm r0, {r4, r5, lr}
+ str r6, [r0, #12]
+ mov r4, #0
+ ldr r6, [sp, #28] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r9
+ sbcs r2, r2, r6
+ str r1, [r0, #16]
+ sbcs r1, r7, r11
+ str r2, [r0, #20]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ sbcs r1, r8, r10
+ str r1, [r0, #28]
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r6, r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ sbcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r9, r7, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r12, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ sbcs lr, r7, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ sbcs r8, r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r5, [sp, #24] @ 4-byte Reload
+ sbcs r11, r5, r2
+ sbc r2, r4, #0
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r3, {r4, r5}
+ ldr r2, [r3, #8]
+ ldr r10, [r3, #20]
+ ldr r7, [r3, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r3, #12]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ ldr r3, [r3, #28]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adds r4, r3, r4
+ adcs r5, r6, r5
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ adcs r2, r12, r2
+ adcs r12, lr, r10
+ adcs lr, r8, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adc r10, r11, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r4, [r0, #32]
+ moveq r5, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r5, [r0, #36]
+ moveq r6, r3
+ cmp r7, #0
+ moveq r1, r9
+ str r6, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #48]
+ moveq r12, r1
+ cmp r7, #0
+ moveq lr, r8
+ moveq r10, r11
+ str r12, [r0, #52]
+ str lr, [r0, #56]
+ str r10, [r0, #60]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv288x32,%function
+.LmulPv288x32: @ @mulPv288x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r1, [r1, #32]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ adc r2, r7, #0
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 2
+ .type mcl_fp_mulUnitPre9L,%function
+mcl_fp_mulUnitPre9L: @ @mcl_fp_mulUnitPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #20
+ ldr r12, [sp, #36]
+ ldm lr, {r0, r3, r8, lr}
+ ldr r1, [sp, #16]
+ ldm sp, {r5, r6, r7}
+ ldr r2, [sp, #12]
+ stm r4, {r5, r6, r7}
+ str r2, [r4, #12]
+ str r1, [r4, #16]
+ add r1, r4, #20
+ stm r1, {r0, r3, r8, lr}
+ str r12, [r4, #36]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 2
+ .type mcl_fpDbl_mulPre9L,%function
+mcl_fpDbl_mulPre9L: @ @mcl_fpDbl_mulPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #412
+ sub sp, sp, #412
+ mov r10, r2
+ mov r8, r0
+ add r0, sp, #368
+ str r1, [sp, #44] @ 4-byte Spill
+ mov r4, r1
+ ldr r2, [r10]
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #404]
+ ldr r1, [sp, #376]
+ ldr r2, [r10, #4]
+ ldr r9, [sp, #372]
+ ldr r11, [sp, #380]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r1, [sp, #16] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [r8]
+ add r0, sp, #328
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #352
+ ldr r4, [sp, #364]
+ add r7, sp, #332
+ ldm lr, {r3, r12, lr}
+ ldr r6, [sp, #328]
+ ldm r7, {r0, r1, r2, r5, r7}
+ adds r6, r6, r9
+ str r6, [r8, #4]
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r1, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r4, #0
+ ldr r4, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #288
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #312
+ add lr, sp, #288
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r8, #8]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #12]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ mov r9, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #248
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #272
+ ldr r4, [sp, #284]
+ add r6, sp, #252
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #248]
+ ldr r5, [sp, #268]
+ ldm r6, {r0, r1, r2, r6}
+ adds r7, r7, r11
+ str r7, [r8, #12]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #16]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, #0
+ mov r4, r9
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #208
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #232
+ add lr, sp, #208
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r11
+ str r0, [r8, #16]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #20]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ mov r9, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #192
+ ldr r4, [sp, #204]
+ add r6, sp, #172
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #168]
+ ldr r5, [sp, #188]
+ ldm r6, {r0, r1, r2, r6}
+ adds r7, r7, r11
+ str r7, [r8, #20]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #24]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r4, #0
+ mov r4, r9
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #128
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #152
+ add lr, sp, #128
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r11
+ str r0, [r8, #24]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #28]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #124]
+ add lr, sp, #112
+ add r7, sp, #92
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r2, [sp, #88]
+ ldr r6, [sp, #108]
+ ldm r7, {r0, r1, r3, r7}
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adds r2, r2, r11
+ adcs r9, r0, r4
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r2, [r8, #28]
+ ldr r2, [r10, #32]
+ adcs r10, r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r4, r0, #0
+ add r0, sp, #48
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #48
+ ldm r3, {r0, r1, r2, r3}
+ ldr r12, [sp, #84]
+ ldr lr, [sp, #80]
+ adds r0, r0, r9
+ ldr r9, [sp, #76]
+ adcs r1, r1, r10
+ adcs r2, r2, r11
+ ldr r11, [sp, #72]
+ adcs r10, r3, r7
+ ldr r7, [sp, #64]
+ ldr r3, [sp, #68]
+ str r0, [r8, #32]
+ str r1, [r8, #36]
+ str r2, [r8, #40]
+ str r10, [r8, #44]
+ adcs r0, r7, r6
+ str r0, [r8, #48]
+ adcs r0, r3, r5
+ str r0, [r8, #52]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r8, #56]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r8, #60]
+ adcs r0, lr, r4
+ adc r1, r12, #0
+ str r0, [r8, #64]
+ str r1, [r8, #68]
+ add sp, sp, #412
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 2
+ .type mcl_fpDbl_sqrPre9L,%function
+mcl_fpDbl_sqrPre9L: @ @mcl_fpDbl_sqrPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #412
+ sub sp, sp, #412
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #368
+ ldr r2, [r5]
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #404]
+ add r11, sp, #368
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r0, r10, r11}
+ ldr r1, [sp, #380]
+ ldr r2, [r5, #4]
+ str r1, [sp, #20] @ 4-byte Spill
+ str r0, [r4]
+ add r0, sp, #328
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #348
+ add r7, sp, #328
+ ldr r9, [sp, #364]
+ ldr r8, [sp, #360]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #4]
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r2, [r5, #8]
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #312
+ add lr, sp, #288
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #8]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #248
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #268
+ add r7, sp, #248
+ ldr r9, [sp, #284]
+ ldr r8, [sp, #280]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #12]
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r2, [r5, #16]
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #232
+ add lr, sp, #208
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #16]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r2, [r5, #20]
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #188
+ add r7, sp, #168
+ ldr r9, [sp, #204]
+ ldr r8, [sp, #200]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #20]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r2, [r5, #24]
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #152
+ add lr, sp, #128
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #24]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r2, [r5, #28]
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #124]
+ ldr r2, [sp, #88]
+ ldr r1, [sp, #92]
+ add r12, sp, #96
+ ldr lr, [sp, #116]
+ ldr r6, [sp, #112]
+ ldr r7, [sp, #108]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #120]
+ adds r2, r2, r10
+ adcs r10, r1, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r12, {r0, r3, r12}
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r2, [r4, #28]
+ ldr r2, [r5, #32]
+ adcs r11, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r9, r12, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #48
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #48
+ add lr, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ ldr r12, [sp, #84]
+ adds r0, r0, r10
+ adcs r1, r1, r11
+ adcs r2, r2, r8
+ ldm lr, {r5, r8, lr}
+ ldr r6, [sp, #68]
+ ldr r7, [sp, #64]
+ adcs r3, r3, r9
+ add r9, r4, #32
+ stm r9, {r0, r1, r2}
+ str r3, [r4, #44]
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [r4, #48]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [r4, #52]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [r4, #56]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [r4, #60]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ adc r1, r12, #0
+ str r0, [r4, #64]
+ str r1, [r4, #68]
+ add sp, sp, #412
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont9L
+ .align 2
+ .type mcl_fp_mont9L,%function
+mcl_fp_mont9L: @ @mcl_fp_mont9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #804
+ sub sp, sp, #804
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #760
+ str r3, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r7, r1
+ str r6, [sp, #72] @ 4-byte Spill
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #764]
+ ldr r5, [sp, #760]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ mul r2, r5, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #756]
+ add r11, sp, #724
+ ldr r4, [sp, #736]
+ ldr r9, [sp, #720]
+ mov r1, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #740]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ add r0, sp, #680
+ ldr r2, [r6, #4]
+ bl .LmulPv288x32(PLT)
+ adds r0, r9, r5
+ ldr r2, [sp, #4] @ 4-byte Reload
+ mov r1, #0
+ add lr, sp, #680
+ ldr r9, [sp, #716]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r8, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r8, r1, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #640
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #640
+ ldr r11, [sp, #660]
+ ldr r7, [sp, #656]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r2, [r6, #8]
+ ldr r6, [sp, #68] @ 4-byte Reload
+ add r0, sp, #600
+ mov r1, r6
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #600
+ ldr r4, [sp, #624]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #636]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #632]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #628]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #560
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #596]
+ add r10, sp, #560
+ ldr r11, [sp, #580]
+ ldr r7, [sp, #576]
+ mov r1, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #520
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r4, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #556]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #552]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #548]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #480
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #516]
+ add r10, sp, #480
+ ldr r11, [sp, #500]
+ ldr r7, [sp, #496]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ add r0, sp, #440
+ ldr r2, [r6, #16]
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r4, [sp, #464]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #476]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #472]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #468]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #400
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #436]
+ add r10, sp, #400
+ ldr r11, [sp, #420]
+ ldr r7, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r2, [r6, #20]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ add r0, sp, #360
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r4, [sp, #384]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #396]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #392]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #388]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r6, lr
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r5, r6
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #356]
+ add r10, sp, #320
+ ldr r11, [sp, #340]
+ ldr r7, [sp, #336]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #280
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r4, [sp, #304]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #316]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #312]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #308]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r6
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #276]
+ add r10, sp, #240
+ ldr r11, [sp, #260]
+ ldr r7, [sp, #256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #200
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r4, [sp, #224]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #236]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #228]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #160
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #196]
+ add r10, sp, #160
+ ldr r11, [sp, #184]
+ ldr r6, [sp, #180]
+ ldr r7, [sp, #176]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #120
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #120]
+ ldr lr, [sp, #124]
+ ldr r5, [sp, #128]
+ ldr r12, [sp, #132]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r10
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #136
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r4, r4, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r9, r9, lr
+ adcs r10, r10, r5
+ mul r8, r4, r0
+ ldm r7, {r0, r1, r2, r3, r6, r7}
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r5, r12
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r5, r5, r0
+ adcs r0, r11, r1
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r11
+ adcs r0, r0, r2
+ mov r2, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #80
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r0, r9, r1
+ ldr r1, [sp, #96]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r10, r2
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r8, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r5, r1
+ ldr r1, [sp, #100]
+ adcs r4, r0, r1
+ ldr r1, [sp, #104]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ adcs r6, r0, r1
+ ldr r1, [sp, #108]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r1
+ ldr r1, [sp, #112]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs lr, r0, r1
+ ldr r1, [sp, #116]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str lr, [sp, #68] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #72] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r11
+ ldmib r0, {r2, r3, r11}
+ ldr r1, [r0, #16]
+ ldr r9, [r0]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r0, #20]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r0, #24]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r0, #28]
+ str r1, [sp, #36] @ 4-byte Spill
+ mov r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ subs r9, r0, r9
+ sbcs r2, r7, r2
+ sbcs r3, r8, r3
+ sbcs r7, r10, r11
+ ldr r11, [r1, #32]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r1, r4, r1
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs r4, r6, r4
+ ldr r6, [sp, #32] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs lr, lr, r6
+ sbcs r11, r5, r11
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r6, r5, #0
+ ldr r5, [sp, #60] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r9, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movne r3, r8
+ str r9, [r5]
+ movne r2, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r6, #0
+ movne r7, r10
+ str r2, [r5, #4]
+ str r3, [r5, #8]
+ str r7, [r5, #12]
+ movne r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r1, [r5, #16]
+ movne r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r5, #20]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r5, #24]
+ movne lr, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str lr, [r5, #28]
+ movne r11, r0
+ str r11, [r5, #32]
+ add sp, sp, #804
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF9L
+ .align 2
+ .type mcl_fp_montNF9L,%function
+mcl_fp_montNF9L: @ @mcl_fp_montNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #804
+ sub sp, sp, #804
+ add r12, sp, #60
+ str r2, [sp, #72] @ 4-byte Spill
+ mov r4, r3
+ mov r7, r1
+ stm r12, {r0, r1, r3}
+ add r0, sp, #760
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #76] @ 4-byte Spill
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #764]
+ ldr r5, [sp, #760]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ mul r2, r5, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #724
+ ldr r6, [sp, #736]
+ ldr r11, [sp, #720]
+ mov r1, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #740]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r4, [sp, #72] @ 4-byte Reload
+ add r0, sp, #680
+ ldr r2, [r4, #4]
+ bl .LmulPv288x32(PLT)
+ adds r0, r11, r5
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #680
+ ldr r11, [sp, #704]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r5, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #640
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #644
+ ldr r7, [sp, #656]
+ ldr r11, [sp, #640]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r5, [sp, #64] @ 4-byte Reload
+ ldr r2, [r4, #8]
+ add r0, sp, #600
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #600
+ ldr r11, [sp, #624]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #636]
+ adcs r0, r0, r9
+ ldr r9, [sp, #632]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #628]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #560
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #596]
+ add r10, sp, #564
+ ldr r7, [sp, #576]
+ ldr r11, [sp, #560]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #520
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r11, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r6, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #556]
+ adcs r0, r0, r9
+ ldr r9, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #548]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r6, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r5, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #480
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #516]
+ add r10, sp, #484
+ ldr r7, [sp, #496]
+ ldr r11, [sp, #480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ add r0, sp, #440
+ ldr r2, [r5, #16]
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r11, [sp, #464]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #476]
+ adcs r0, r0, r9
+ ldr r9, [sp, #472]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #468]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #436]
+ add r10, sp, #404
+ ldr r7, [sp, #416]
+ ldr r11, [sp, #400]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r2, [r5, #20]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ add r0, sp, #360
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r11, [sp, #384]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #396]
+ adcs r0, r0, r9
+ ldr r9, [sp, #392]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #388]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #356]
+ add r10, sp, #324
+ ldr r7, [sp, #336]
+ ldr r11, [sp, #320]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #280
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r11, [sp, #304]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #316]
+ adcs r0, r0, r9
+ ldr r9, [sp, #312]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #308]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #240
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #276]
+ add r10, sp, #244
+ ldr r7, [sp, #256]
+ ldr r11, [sp, #240]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #200
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r11, [sp, #224]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #236]
+ adcs r0, r0, r9
+ ldr r9, [sp, #232]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #228]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r6, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #160
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #196]
+ add r10, sp, #164
+ ldr r4, [sp, #184]
+ ldr r6, [sp, #180]
+ ldr r7, [sp, #176]
+ ldr r11, [sp, #160]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #120
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #120
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #136
+ adcs r1, r1, r9
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r10, r1, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r1, r7
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm lr, {r2, r12, lr}
+ ldr r4, [sp, #132]
+ adds r5, r0, r2
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r9, r5, r0
+ ldm r8, {r0, r1, r2, r3, r6, r8}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r7, r7, r12
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r7, r10, lr
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adcs r11, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ adcs r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r4, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #80
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #96]
+ str r9, [sp, #32] @ 4-byte Spill
+ adcs r2, r0, r2
+ adcs r0, r11, r3
+ str r2, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r1, [sp, #100]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r6, r0, r1
+ ldr r1, [sp, #104]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r1, [sp, #108]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [sp, #56] @ 4-byte Spill
+ adcs r4, r4, r1
+ ldr r1, [sp, #112]
+ str r4, [sp, #64] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r1, [sp, #116]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adc r12, r0, r1
+ mov r0, r10
+ ldr r1, [r0, #16]
+ ldr r8, [r0]
+ ldr r11, [r0, #4]
+ ldr r10, [r0, #8]
+ ldr r3, [r0, #12]
+ str r12, [sp, #72] @ 4-byte Spill
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r0, #20]
+ subs r8, r9, r8
+ ldr r9, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r0, #24]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r0, #28]
+ ldr r0, [r0, #32]
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r2, r11
+ sbcs r2, r9, r10
+ mov r10, r6
+ sbcs r3, r7, r3
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r11, lr, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs lr, r4, r6
+ ldr r4, [sp, #40] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbc r0, r12, r0
+ asr r12, r0, #31
+ cmp r12, #0
+ movlt r8, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ movlt r1, r6
+ movlt r2, r9
+ cmp r12, #0
+ movlt r7, r10
+ str r8, [r5]
+ str r1, [r5, #4]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r5, #8]
+ movlt r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r3, [r5, #12]
+ str r7, [r5, #16]
+ movlt r11, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r12, #0
+ str r11, [r5, #20]
+ movlt lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str lr, [r5, #24]
+ movlt r4, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r4, [r5, #28]
+ movlt r0, r1
+ str r0, [r5, #32]
+ add sp, sp, #804
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed9L
+ .align 2
+ .type mcl_fp_montRed9L,%function
+mcl_fp_montRed9L: @ @mcl_fp_montRed9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #476
+ sub sp, sp, #476
+ mov r5, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r4, [r1]
+ ldr r11, [r1, #32]
+ ldr r10, [r1, #36]
+ ldr r0, [r5]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #24]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #-4]
+ str r0, [sp, #108] @ 4-byte Spill
+ mul r2, r4, r0
+ ldr r0, [r5, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ mov r1, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #432
+ bl .LmulPv288x32(PLT)
+ ldr r1, [sp, #432]
+ add lr, sp, #436
+ ldr r9, [sp, #468]
+ ldr r8, [sp, #464]
+ ldm lr, {r0, r2, r3, r6, r7, r12, lr}
+ adds r1, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r4, r7
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r9, r10, r9
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #392
+ bl .LmulPv288x32(PLT)
+ add r11, sp, #408
+ add r6, sp, #392
+ ldr r12, [sp, #428]
+ ldr lr, [sp, #424]
+ ldr r8, [sp, #420]
+ ldm r11, {r2, r10, r11}
+ ldm r6, {r0, r1, r3, r6}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r9, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #372
+ add r7, sp, #352
+ ldr r10, [sp, #388]
+ ldr r9, [sp, #384]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r4, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r6
+ mov r11, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #332
+ ldr r7, [sp, #348]
+ add r9, sp, #320
+ ldm lr, {r6, r8, r12, lr}
+ ldr r1, [sp, #312]
+ ldr r3, [sp, #316]
+ ldm r9, {r0, r2, r9}
+ adds r1, r4, r1
+ mov r4, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #272
+ ldr r11, [sp, #308]
+ ldr r9, [sp, #304]
+ ldm lr, {r0, r1, r2, r3, r6, r7, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r8, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ mov r6, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv288x32(PLT)
+ add r11, sp, #256
+ add lr, sp, #232
+ ldm r11, {r7, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r8, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #212
+ add r7, sp, #192
+ ldr r9, [sp, #228]
+ ldr r8, [sp, #224]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r11, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ mul r2, r4, r8
+ adcs r9, r0, r9
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv288x32(PLT)
+ add r12, sp, #152
+ ldm r12, {r0, r1, r3, r12}
+ ldr lr, [sp, #188]
+ adds r0, r4, r0
+ adcs r4, r10, r1
+ ldr r1, [sp, #168]
+ adcs r11, r11, r3
+ mul r2, r4, r8
+ ldr r3, [sp, #180]
+ adcs r0, r7, r12
+ ldr r7, [sp, #176]
+ ldr r12, [sp, #184]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r10, r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r5
+ adcs r7, r0, r7
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r6, r11, r1
+ ldr r1, [sp, #128]
+ adcs r9, r0, r2
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r10, r3
+ adcs lr, r8, r1
+ ldr r1, [sp, #132]
+ str r11, [sp, #28] @ 4-byte Spill
+ str lr, [sp, #32] @ 4-byte Spill
+ adcs r7, r7, r1
+ ldr r1, [sp, #136]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r8, r0, r1
+ ldr r1, [sp, #140]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r8, [sp, #48] @ 4-byte Spill
+ adcs r4, r0, r1
+ ldr r1, [sp, #144]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r1, [sp, #148]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #108] @ 4-byte Spill
+ adcs r12, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r10, r0, #0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ subs r2, r6, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r3, r9, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r1, r11, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r11, lr, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ sbcs lr, r8, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ sbcs r8, r4, r7
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #72] @ 4-byte Reload
+ sbcs r5, r12, r5
+ sbc r7, r10, #0
+ ands r7, r7, #1
+ movne r2, r6
+ ldr r6, [sp, #104] @ 4-byte Reload
+ movne r3, r9
+ str r2, [r6]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r3, [r6, #4]
+ movne r1, r2
+ cmp r7, #0
+ str r1, [r6, #8]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ movne r11, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r11, [r6, #12]
+ movne r0, r1
+ str r0, [r6, #16]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movne lr, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r7, #0
+ movne r5, r12
+ str lr, [r6, #20]
+ movne r8, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r8, [r6, #24]
+ movne r4, r0
+ str r4, [r6, #28]
+ str r5, [r6, #32]
+ add sp, sp, #476
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre9L
+ .align 2
+ .type mcl_fp_addPre9L,%function
+mcl_fp_addPre9L: @ @mcl_fp_addPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r4, [r2, #16]
+ ldr r8, [r2]
+ ldr r11, [r2, #28]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r10, r8, r3
+ adcs r5, r5, r12
+ ldr r12, [r1, #32]
+ ldr r8, [sp, #12] @ 4-byte Reload
+ str r10, [r0]
+ adcs lr, r6, lr
+ ldr r6, [r1, #20]
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ ldr r2, [r2, #32]
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ ldr r4, [r1, #24]
+ ldr r1, [r1, #16]
+ adcs r1, r8, r1
+ adcs r6, r3, r6
+ ldr r3, [sp] @ 4-byte Reload
+ stmib r0, {r5, lr}
+ str r7, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r6, [r0, #20]
+ adcs r4, r3, r4
+ adcs r2, r11, r2
+ str r4, [r0, #24]
+ adcs r1, r1, r12
+ str r2, [r0, #28]
+ str r1, [r0, #32]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre9L
+ .align 2
+ .type mcl_fp_subPre9L,%function
+mcl_fp_subPre9L: @ @mcl_fp_subPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r3, [r2, #8]
+ add lr, r1, #16
+ ldr r11, [r2, #4]
+ ldr r10, [r2, #12]
+ ldr r4, [r2]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #16]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldmib r1, {r5, r6, r7}
+ ldm lr, {r3, r12, lr}
+ ldr r9, [r1]
+ ldr r8, [r1, #28]
+ subs r4, r9, r4
+ ldr r9, [r2, #32]
+ ldr r2, [sp] @ 4-byte Reload
+ sbcs r11, r5, r11
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r6, r6, r2
+ sbcs r7, r7, r10
+ ldr r10, [r1, #32]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r2, r12, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ stm r0, {r4, r11}
+ str r6, [r0, #8]
+ str r7, [r0, #12]
+ str r3, [r0, #16]
+ str r2, [r0, #20]
+ sbcs r1, lr, r1
+ sbcs r5, r8, r5
+ str r1, [r0, #24]
+ sbcs r1, r10, r9
+ str r5, [r0, #28]
+ str r1, [r0, #32]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_9L
+ .align 2
+ .type mcl_fp_shr1_9L,%function
+mcl_fp_shr1_9L: @ @mcl_fp_shr1_9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ add r12, r1, #16
+ ldr r2, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r12, {r4, r5, r6, r8, r12}
+ ldm r1, {r1, r3}
+ lsrs r7, r3, #1
+ rrx r1, r1
+ str r1, [r0]
+ lsr r1, r3, #1
+ orr r1, r1, r2, lsl #31
+ str r1, [r0, #4]
+ lsrs r1, lr, #1
+ rrx r1, r2
+ str r1, [r0, #8]
+ lsr r1, lr, #1
+ orr r1, r1, r4, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r5, #1
+ rrx r1, r4
+ str r1, [r0, #16]
+ lsr r1, r5, #1
+ orr r1, r1, r6, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r6
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, r12, lsl #31
+ str r1, [r0, #28]
+ lsr r1, r12, #1
+ str r1, [r0, #32]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add9L
+ .align 2
+ .type mcl_fp_add9L,%function
+mcl_fp_add9L: @ @mcl_fp_add9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r9, [r1, #8]
+ ldr r8, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r12, r5, r12
+ ldr r5, [r1, #24]
+ adcs lr, r4, lr
+ ldr r4, [r1, #20]
+ str r12, [sp, #8] @ 4-byte Spill
+ adcs r10, r6, r9
+ ldr r6, [r1, #16]
+ adcs r9, r7, r8
+ ldr r7, [r2, #16]
+ str r10, [sp, #4] @ 4-byte Spill
+ adcs r6, r7, r6
+ ldr r7, [r2, #20]
+ adcs r7, r7, r4
+ ldr r4, [r2, #24]
+ adcs r11, r4, r5
+ ldr r5, [r1, #28]
+ ldr r4, [r2, #28]
+ ldr r1, [r1, #32]
+ ldr r2, [r2, #32]
+ adcs r8, r4, r5
+ adcs r4, r2, r1
+ mov r2, lr
+ add r1, r0, #16
+ str r4, [r0, #32]
+ str r12, [r0]
+ stmib r0, {r2, r10}
+ str r9, [r0, #12]
+ stm r1, {r6, r7, r11}
+ mov r1, #0
+ str r8, [r0, #28]
+ adc r1, r1, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r1, r5, lr}
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r12, [r3, #12]
+ subs r1, r10, r1
+ str r1, [sp, #8] @ 4-byte Spill
+ sbcs r1, r2, r5
+ ldr r5, [r3, #20]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r2, r1, lr
+ ldr r1, [r3, #16]
+ sbcs r12, r9, r12
+ sbcs r1, r6, r1
+ ldr r6, [r3, #24]
+ sbcs r5, r7, r5
+ ldr r7, [r3, #28]
+ ldr r3, [r3, #32]
+ sbcs r6, r11, r6
+ sbcs r7, r8, r7
+ sbcs r3, r4, r3
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbc r4, r4, #0
+ tst r4, #1
+ bne .LBB136_2
+@ BB#1: @ %nocarry
+ str r3, [r0, #32]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ str r12, [r0, #12]
+ add r0, r0, #16
+ stm r0, {r1, r5, r6, r7}
+.LBB136_2: @ %carry
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF9L
+ .align 2
+ .type mcl_fp_addNF9L,%function
+mcl_fp_addNF9L: @ @mcl_fp_addNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r5, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r10, [r3, #4]
+ adds r5, r5, r9
+ adcs r9, r4, r8
+ ldr r4, [r1, #16]
+ ldr r8, [r1, #20]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ adcs r11, r6, lr
+ ldr lr, [sp, #16] @ 4-byte Reload
+ str r9, [sp, #28] @ 4-byte Spill
+ adcs r12, r7, r12
+ ldr r7, [r2, #16]
+ str r12, [sp, #32] @ 4-byte Spill
+ adcs r6, r7, r4
+ ldr r7, [r2, #20]
+ str r6, [sp, #36] @ 4-byte Spill
+ adcs r4, r7, r8
+ ldr r7, [r2, #24]
+ ldr r8, [r3]
+ str r4, [sp, #40] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ ldr r1, [r1, #32]
+ adcs r7, r5, r7
+ ldr r5, [r3, #8]
+ adc r1, r2, r1
+ ldr r2, [r3, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r3, #12]
+ subs r8, lr, r8
+ str r1, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3, #32]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ sbcs r2, r9, r10
+ sbcs r5, r11, r5
+ sbcs r7, r12, r7
+ sbcs r12, r6, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ sbcs r6, r4, r3
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r3, [sp, #12] @ 4-byte Reload
+ sbcs r9, r4, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r10, r3, r4
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r8, lr
+ movlt r2, r4
+ movlt r5, r11
+ cmp r1, #0
+ str r8, [r0]
+ str r2, [r0, #4]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r5, [r0, #8]
+ movlt r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #12]
+ movlt r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #16]
+ movlt r6, r2
+ cmp r1, #0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r6, [r0, #20]
+ movlt r9, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r9, [r0, #24]
+ movlt r10, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r3, r1
+ str r3, [r0, #32]
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub9L
+ .align 2
+ .type mcl_fp_sub9L,%function
+mcl_fp_sub9L: @ @mcl_fp_sub9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldm r2, {r12, lr}
+ ldr r5, [r1]
+ ldr r8, [r2, #8]
+ ldr r9, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r12, r5, r12
+ ldr r5, [r2, #24]
+ sbcs lr, r4, lr
+ ldr r4, [r2, #20]
+ sbcs r8, r6, r8
+ ldr r6, [r2, #16]
+ sbcs r9, r7, r9
+ ldr r7, [r1, #16]
+ sbcs r10, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ sbcs r7, r7, r4
+ ldr r4, [r1, #24]
+ ldr r1, [r1, #32]
+ sbcs r4, r4, r5
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #32]
+ sbcs r5, r6, r5
+ sbcs r1, r1, r2
+ add r2, r0, #8
+ str r1, [r0, #32]
+ stm r0, {r12, lr}
+ stm r2, {r8, r9, r10}
+ mov r2, #0
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB138_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #32]
+ ldr r6, [r3, #4]
+ ldr r11, [r3, #12]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adds r3, r3, r12
+ adcs r6, r6, lr
+ adcs r8, r2, r8
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r12, r11, r9
+ adcs lr, r2, r10
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r7, r2, r7
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r4, r2, r4
+ ldr r2, [sp, #16] @ 4-byte Reload
+ stm r0, {r3, r6, r8, r12, lr}
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ adcs r5, r2, r5
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r5, [r0, #28]
+ adc r1, r2, r1
+ str r1, [r0, #32]
+.LBB138_2: @ %nocarry
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF9L
+ .align 2
+ .type mcl_fp_subNF9L,%function
+mcl_fp_subNF9L: @ @mcl_fp_subNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldm r2, {r6, r8}
+ ldr r7, [r2, #8]
+ ldr r5, [r2, #16]
+ ldr r4, [r1, #16]
+ ldr r11, [r1, #20]
+ ldr r10, [r1, #24]
+ ldr r9, [r1, #28]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ ldm r1, {r1, r2, r12, lr}
+ subs r6, r1, r6
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r7, r2, r8
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r6, [sp, #12] @ 4-byte Spill
+ str r7, [sp, #16] @ 4-byte Spill
+ sbcs r8, r12, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r8, [sp, #20] @ 4-byte Spill
+ sbcs r12, lr, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r5, r4, r5
+ str r12, [sp, #32] @ 4-byte Spill
+ str r5, [sp, #36] @ 4-byte Spill
+ sbcs lr, r11, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r11, [r3, #16]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r4, r10, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r10, [r3, #20]
+ str r4, [sp, #24] @ 4-byte Spill
+ sbcs r9, r9, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbc r1, r2, r1
+ ldr r2, [r3, #24]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r3, r6, r3
+ adcs r6, r7, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r7, r8, r1
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r12, r1
+ adcs r12, r5, r11
+ adcs r5, lr, r10
+ ldr r10, [sp, #12] @ 4-byte Reload
+ adcs lr, r4, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r8, r9, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adc r11, r4, r2
+ asr r2, r4, #31
+ cmp r2, #0
+ movge r3, r10
+ str r3, [r0]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movge r6, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r6, [r0, #4]
+ movge r7, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ cmp r2, #0
+ str r7, [r0, #8]
+ movge r1, r3
+ str r1, [r0, #12]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ movge r12, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #16]
+ movge r5, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ cmp r2, #0
+ movge r8, r9
+ movge r11, r4
+ str r5, [r0, #20]
+ movge lr, r1
+ str lr, [r0, #24]
+ str r8, [r0, #28]
+ str r11, [r0, #32]
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add9L
+ .align 2
+ .type mcl_fpDbl_add9L,%function
+mcl_fpDbl_add9L: @ @mcl_fpDbl_add9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ ldm r1, {r7, r9}
+ ldr r8, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r2, {r4, r5, r6, r12}
+ add r11, r2, #16
+ adds r4, r4, r7
+ ldr r7, [r2, #28]
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r4, [sp, #80] @ 4-byte Spill
+ adcs r4, r5, r9
+ str r4, [sp, #32] @ 4-byte Spill
+ adcs r4, r6, r8
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ adcs r9, r12, lr
+ add lr, r1, #16
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ str r4, [sp, #64] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #68] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #72] @ 4-byte Spill
+ ldr r4, [r2, #60]
+ str r4, [sp, #84] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r2, [r1, #64]
+ ldr r8, [r1, #32]
+ ldr r4, [r1, #36]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r10, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r10, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r5, r1
+ adcs r2, r6, r2
+ str r7, [r0, #8]
+ str r9, [r0, #12]
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r11, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [r0, #32]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r2, r4
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r5, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r12, r1, r2
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r8, [sp, #72] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r6, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r2
+ mov r2, #0
+ adc r1, r2, #0
+ str r9, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #80] @ 4-byte Spill
+ ldmib r3, {r2, r11}
+ ldr r1, [r3, #12]
+ ldr r10, [r3]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ subs r10, r4, r10
+ sbcs r2, r5, r2
+ sbcs r11, lr, r11
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3, #32]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ sbcs r12, r7, r5
+ ldr r7, [sp, #44] @ 4-byte Reload
+ ldr r5, [sp, #84] @ 4-byte Reload
+ sbcs lr, r8, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ mov r8, r6
+ sbcs r7, r5, r7
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r6, r5
+ sbcs r6, r9, r3
+ ldr r3, [sp, #80] @ 4-byte Reload
+ sbc r9, r3, #0
+ ldr r3, [sp, #56] @ 4-byte Reload
+ ands r9, r9, #1
+ movne r10, r4
+ str r10, [r0, #36]
+ movne r2, r3
+ str r2, [r0, #40]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ movne r11, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ cmp r9, #0
+ str r11, [r0, #44]
+ movne r1, r2
+ str r1, [r0, #48]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r12, [r0, #52]
+ movne lr, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp r9, #0
+ movne r5, r8
+ str lr, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #60]
+ str r5, [r0, #64]
+ movne r6, r1
+ str r6, [r0, #68]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub9L
+ .align 2
+ .type mcl_fpDbl_sub9L,%function
+mcl_fpDbl_sub9L: @ @mcl_fpDbl_sub9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ ldr r7, [r2, #64]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldm r2, {r5, r6, r7, r8}
+ ldr r4, [r2, #16]
+ ldr r10, [r2, #24]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r2, [r2, #28]
+ str r4, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #32] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r11, [r1, #60]
+ subs r9, r2, r5
+ ldr r2, [r1, #64]
+ sbcs r5, r12, r6
+ sbcs r6, lr, r7
+ add lr, r1, #16
+ ldr r7, [r1, #36]
+ sbcs r4, r4, r8
+ ldr r8, [r1, #32]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r5, r6}
+ str r4, [r0, #12]
+ ldr r5, [sp, #20] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r4
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r1, r12, r10
+ str r1, [r0, #24]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [r0, #32]
+ sbcs r1, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ sbcs r4, r7, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r12, r7, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ sbcs lr, r7, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ sbcs r10, r5, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r10, [sp, #64] @ 4-byte Spill
+ sbcs r6, r11, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ sbcs r8, r7, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #44] @ 4-byte Spill
+ sbcs r11, r5, r2
+ mov r2, #0
+ sbc r2, r2, #0
+ str r11, [sp, #76] @ 4-byte Spill
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r3, #32]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldmib r3, {r5, r7}
+ ldr r2, [r3, #12]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r1, r3
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r5, r4, r5
+ adcs r1, r9, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adcs r2, r12, r2
+ adcs r12, lr, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs lr, r10, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r10, r6, r7
+ ldr r6, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r6, r8, r6
+ adc r11, r11, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ ands r8, r7, #1
+ ldr r7, [sp, #48] @ 4-byte Reload
+ moveq r5, r4
+ moveq r1, r9
+ moveq r3, r7
+ cmp r8, #0
+ str r3, [r0, #36]
+ str r5, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #48]
+ moveq r12, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r12, [r0, #52]
+ moveq lr, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r8, #0
+ str lr, [r0, #56]
+ moveq r10, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #60]
+ moveq r6, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r6, [r0, #64]
+ moveq r11, r1
+ str r11, [r0, #68]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv320x32,%function
+.LmulPv320x32: @ @mulPv320x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r1, [r1, #36]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #36]
+ adc r1, r7, #0
+ str r1, [r0, #40]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre10L
+ .align 2
+ .type mcl_fp_mulUnitPre10L,%function
+mcl_fp_mulUnitPre10L: @ @mcl_fp_mulUnitPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv320x32(PLT)
+ ldr r12, [sp, #40]
+ ldr lr, [sp, #36]
+ ldr r8, [sp, #32]
+ ldr r9, [sp, #28]
+ ldr r0, [sp, #24]
+ ldr r1, [sp, #20]
+ ldm sp, {r6, r7}
+ add r5, sp, #8
+ ldm r5, {r2, r3, r5}
+ stm r4, {r6, r7}
+ add r6, r4, #8
+ stm r6, {r2, r3, r5}
+ str r1, [r4, #20]
+ str r0, [r4, #24]
+ str r9, [r4, #28]
+ str r8, [r4, #32]
+ str lr, [r4, #36]
+ str r12, [r4, #40]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre10L
+ .align 2
+ .type mcl_fpDbl_mulPre10L,%function
+mcl_fpDbl_mulPre10L: @ @mcl_fpDbl_mulPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #156
+ sub sp, sp, #156
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r0, r4, #40
+ add r1, r5, #20
+ add r2, r6, #20
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r11, r6, #24
+ ldr r7, [r6, #12]
+ ldr r8, [r6, #16]
+ ldr r1, [r6, #20]
+ ldm r11, {r0, r2, r10, r11}
+ ldm r6, {r6, r9, r12}
+ adds lr, r6, r1
+ adcs r3, r9, r0
+ mov r0, #0
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r2, r12, r2
+ str r3, [sp, #68] @ 4-byte Spill
+ adcs r12, r7, r10
+ str r2, [sp, #64] @ 4-byte Spill
+ adcs r10, r8, r11
+ str r12, [sp, #60] @ 4-byte Spill
+ adc r6, r0, #0
+ ldr r0, [r5, #32]
+ str r10, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r5, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldmib r5, {r8, r9, r11}
+ ldr r0, [r5, #16]
+ ldr r7, [r5, #20]
+ ldr r1, [r5, #28]
+ str lr, [sp, #76]
+ str r3, [sp, #80]
+ str r2, [sp, #84]
+ str r12, [sp, #88]
+ str r10, [sp, #92]
+ add r2, sp, #76
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ ldr r5, [r5]
+ adds r5, r5, r7
+ adcs r7, r8, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #96]
+ adcs r9, r9, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r7, [sp, #100]
+ str r9, [sp, #104]
+ adcs r11, r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r11, [sp, #108]
+ adcs r8, r1, r0
+ mov r0, #0
+ add r1, sp, #96
+ adc r10, r0, #0
+ add r0, sp, #116
+ str r8, [sp, #112]
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r6, #0
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r3, [sp, #60] @ 4-byte Reload
+ moveq r5, r6
+ moveq r8, r6
+ moveq r11, r6
+ moveq r9, r6
+ moveq r7, r6
+ str r5, [sp, #52] @ 4-byte Spill
+ adds r0, r5, r0
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs r1, r7, r1
+ adcs r2, r9, r2
+ adcs r3, r11, r3
+ adcs r12, r8, r5
+ mov r5, #0
+ adc lr, r5, #0
+ cmp r10, #0
+ ldr r5, [sp, #52] @ 4-byte Reload
+ moveq r1, r7
+ ldr r7, [sp, #136]
+ moveq r3, r11
+ moveq r2, r9
+ moveq r12, r8
+ moveq lr, r10
+ cmp r10, #0
+ moveq r0, r5
+ and r5, r6, r10
+ ldr r6, [sp, #152]
+ adds r8, r0, r7
+ ldr r7, [sp, #140]
+ adcs r10, r1, r7
+ ldr r7, [sp, #144]
+ adcs r11, r2, r7
+ ldr r7, [sp, #148]
+ adcs r0, r3, r7
+ adcs r12, r12, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ adc r9, lr, r5
+ ldm r4, {r5, r6, r7, lr}
+ ldr r1, [sp, #116]
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #124]
+ ldr r3, [sp, #128]
+ subs r1, r1, r5
+ sbcs r2, r2, r6
+ ldr r6, [sp, #132]
+ sbcs r0, r0, r7
+ ldr r7, [r4, #16]
+ sbcs lr, r3, lr
+ ldr r3, [r4, #20]
+ sbcs r5, r6, r7
+ ldr r6, [r4, #32]
+ ldr r7, [r4, #52]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r8, r3
+ ldr r8, [r4, #56]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r4, #24]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r3, [sp, #68] @ 4-byte Spill
+ sbcs r3, r10, r3
+ ldr r10, [r4, #44]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r4, #28]
+ str r3, [sp, #64] @ 4-byte Spill
+ sbcs r3, r11, r3
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ sbcs r3, r3, r6
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #36]
+ str r3, [sp, #60] @ 4-byte Spill
+ sbcs r3, r12, r3
+ ldr r12, [r4, #64]
+ str r3, [sp, #40] @ 4-byte Spill
+ sbc r3, r9, #0
+ ldr r9, [r4, #40]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r4, #76]
+ subs r1, r1, r9
+ sbcs r2, r2, r10
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r4, #48]
+ ldr r11, [sp, #32] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r4, #72]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, lr, r7
+ ldr lr, [r4, #68]
+ str r0, [sp, #16] @ 4-byte Spill
+ sbcs r0, r5, r8
+ ldr r5, [r4, #60]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r11
+ adcs r0, r0, r6
+ str r1, [r4, #24]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #36]
+ adcs r1, r9, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r1, r1, r6
+ str r0, [r4, #44]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r8, r1
+ adcs r5, r5, r6
+ adcs r7, r12, #0
+ add r12, r4, #52
+ adcs r6, lr, #0
+ stm r12, {r0, r1, r5, r7}
+ adcs r2, r2, #0
+ str r6, [r4, #68]
+ adc r3, r3, #0
+ str r2, [r4, #72]
+ str r3, [r4, #76]
+ add sp, sp, #156
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre10L
+ .align 2
+ .type mcl_fpDbl_sqrPre10L,%function
+mcl_fpDbl_sqrPre10L: @ @mcl_fpDbl_sqrPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #156
+ sub sp, sp, #156
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r1, r5, #20
+ add r0, r4, #40
+ mov r2, r1
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr lr, [r5, #32]
+ ldr r12, [r5, #36]
+ ldmib r5, {r2, r3, r6, r8}
+ ldr r0, [r5, #20]
+ ldr r7, [r5, #24]
+ ldr r1, [r5, #28]
+ ldr r5, [r5]
+ adds r5, r5, r0
+ adcs r0, r2, r7
+ str r5, [sp, #96]
+ str r5, [sp, #76]
+ adcs r1, r3, r1
+ add r3, sp, #80
+ str r0, [sp, #100]
+ adcs r2, r6, lr
+ str r1, [sp, #104]
+ adcs r6, r8, r12
+ str r2, [sp, #108]
+ str r6, [sp, #112]
+ stm r3, {r0, r1, r2, r6}
+ lsr r3, r2, #31
+ orr r3, r3, r6, lsl #1
+ str r3, [sp, #72] @ 4-byte Spill
+ lsr r3, r1, #31
+ lsl r1, r1, #1
+ orr r1, r1, r0, lsr #31
+ orr r2, r3, r2, lsl #1
+ str r1, [sp, #64] @ 4-byte Spill
+ lsr r1, r5, #31
+ str r2, [sp, #68] @ 4-byte Spill
+ add r2, sp, #76
+ orr r11, r1, r0, lsl #1
+ mov r0, #0
+ add r1, sp, #96
+ adc r7, r0, #0
+ add r0, sp, #116
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr r10, [sp, #136]
+ ldr r9, [sp, #140]
+ ldr r8, [sp, #144]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #148]
+ ldr r1, [sp, #152]
+ adds r3, r10, r5, lsl #1
+ adcs r5, r9, r11
+ adcs r12, r8, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs lr, r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r1, r0
+ adc r6, r7, r6, lsr #31
+ cmp r7, #0
+ moveq lr, r2
+ moveq r12, r8
+ moveq r11, r1
+ moveq r6, r7
+ moveq r5, r9
+ cmp r7, #0
+ add r7, sp, #116
+ moveq r3, r10
+ ldm r4, {r9, r10}
+ ldr r0, [r4, #8]
+ ldr r8, [r4, #12]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldm r7, {r1, r2, r7}
+ ldr r0, [sp, #128]
+ subs r1, r1, r9
+ ldr r9, [r4, #40]
+ sbcs r2, r2, r10
+ ldr r10, [r4, #44]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ sbcs r7, r7, r2
+ ldr r2, [r4, #48]
+ str r7, [sp, #44] @ 4-byte Spill
+ sbcs r8, r0, r8
+ ldr r0, [r4, #16]
+ ldr r7, [sp, #132]
+ str r2, [sp, #16] @ 4-byte Spill
+ sbcs r0, r7, r0
+ ldr r7, [r4, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r3, r3, r0
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r4, #24]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r5, r3
+ ldr r5, [r4, #60]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r4, #28]
+ str r3, [sp, #68] @ 4-byte Spill
+ sbcs r3, r12, r3
+ ldr r12, [r4, #64]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r4, #32]
+ str r3, [sp, #64] @ 4-byte Spill
+ sbcs r3, lr, r3
+ ldr lr, [r4, #68]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #36]
+ str r3, [sp, #60] @ 4-byte Spill
+ sbcs r3, r11, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ sbc r3, r6, #0
+ subs r1, r1, r9
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r4, #76]
+ sbcs r1, r1, r10
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r11, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [r4, #72]
+ str r1, [sp, #44] @ 4-byte Spill
+ sbcs r1, r8, r7
+ ldr r8, [r4, #56]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r1, r1, r8
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r1, r1, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r1, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r1, r1, r3
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbc r1, r1, #0
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ adcs r0, r0, r6
+ str r1, [r4, #24]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #36]
+ adcs r1, r9, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r1, r1, r6
+ str r0, [r4, #44]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r8, r1
+ adcs r5, r5, r6
+ adcs r7, r12, #0
+ add r12, r4, #52
+ adcs r6, lr, #0
+ stm r12, {r0, r1, r5, r7}
+ adcs r2, r2, #0
+ str r6, [r4, #68]
+ adc r3, r3, #0
+ str r2, [r4, #72]
+ str r3, [r4, #76]
+ add sp, sp, #156
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont10L
+ .align 2
+ .type mcl_fp_mont10L,%function
+mcl_fp_mont10L: @ @mcl_fp_mont10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1000
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r10, [sp, #1000]
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #952
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #992]
+ ldr r2, [r7, #4]
+ ldr r9, [sp, #968]
+ ldr r8, [sp, #952]
+ ldr r11, [sp, #956]
+ ldr r5, [sp, #960]
+ ldr r4, [sp, #964]
+ mov r1, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #904
+ bl .LmulPv320x32(PLT)
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adds r0, r8, r10
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r10, [sp, #944]
+ mov r0, #0
+ adcs r1, r11, r1
+ add r11, sp, #932
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #904]
+ adcs r8, r2, r1
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #896]
+ add r11, sp, #856
+ ldr r6, [sp, #880]
+ ldr r7, [sp, #876]
+ ldr r5, [sp, #872]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #808
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #848]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #760
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #800]
+ add r11, sp, #760
+ ldr r6, [sp, #784]
+ ldr r4, [sp, #780]
+ ldr r5, [sp, #776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #712
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #752]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #740
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #712]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #704]
+ add r11, sp, #664
+ ldr r6, [sp, #688]
+ ldr r7, [sp, #684]
+ ldr r5, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #616
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #656]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #568
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #608]
+ add r11, sp, #568
+ ldr r6, [sp, #592]
+ ldr r4, [sp, #588]
+ ldr r5, [sp, #584]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #520
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #560]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #548
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #520]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #512]
+ add r11, sp, #472
+ ldr r6, [sp, #496]
+ ldr r7, [sp, #492]
+ ldr r5, [sp, #488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #424
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #464]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #376
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #416]
+ add r11, sp, #376
+ ldr r6, [sp, #400]
+ ldr r4, [sp, #396]
+ ldr r5, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #328
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #368]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #356
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #328]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #320]
+ add r11, sp, #280
+ ldr r6, [sp, #304]
+ ldr r7, [sp, #300]
+ ldr r5, [sp, #296]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #232
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #272]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #184
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #224]
+ add r11, sp, #184
+ ldr r6, [sp, #208]
+ ldr r4, [sp, #204]
+ ldr r5, [sp, #200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #136
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #136
+ add r7, sp, #152
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #164
+ adcs r10, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r11, r1, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm lr, {r2, r6, r12, lr}
+ ldr r8, [sp, #176]
+ adds r4, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldm r9, {r3, r5, r9}
+ adcs r6, r10, r6
+ mul r2, r4, r0
+ ldm r7, {r0, r1, r7}
+ str r6, [sp, #40] @ 4-byte Spill
+ adcs r6, r11, r12
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, lr
+ ldr r6, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r11
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r8
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #88
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r2, [sp, #104]
+ adcs r0, r10, r3
+ str r4, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #108]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r5, r6, r2
+ ldr r2, [sp, #112]
+ str r5, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #124]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #128]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r7, r2
+ adc r0, r0, #0
+ str r9, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, r11
+ ldr r2, [r0, #16]
+ ldr r10, [r0]
+ ldr r3, [r0, #4]
+ ldr r1, [r0, #8]
+ ldr r6, [r0, #12]
+ ldr r7, [r0, #24]
+ ldr r11, [r0, #32]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r0, #20]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r0, #28]
+ ldr r0, [r0, #36]
+ str r2, [sp, #36] @ 4-byte Spill
+ mov r2, r8
+ ldr r8, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ subs r10, r2, r10
+ sbcs r3, r4, r3
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbcs r1, r8, r1
+ sbcs r6, r4, r6
+ sbcs r4, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r5, r12, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r12, lr, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ sbcs lr, r0, r7
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r0, r9, r0
+ ldr r9, [sp, #68] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r10, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ movne r1, r8
+ str r10, [r9]
+ movne r3, r2
+ cmp r7, #0
+ str r3, [r9, #4]
+ str r1, [r9, #8]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ movne r6, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r6, [r9, #12]
+ movne r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r4, [r9, #16]
+ movne r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r7, #0
+ str r5, [r9, #20]
+ movne r12, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r12, [r9, #24]
+ movne lr, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str lr, [r9, #28]
+ movne r11, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r9, #32]
+ movne r0, r1
+ str r0, [r9, #36]
+ add sp, sp, #28
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end146:
+ .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF10L
+ .align 2
+ .type mcl_fp_montNF10L,%function
+mcl_fp_montNF10L: @ @mcl_fp_montNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1000
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r10, [sp, #1000]
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #952
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #992]
+ ldr r2, [r7, #4]
+ ldr r9, [sp, #968]
+ ldr r8, [sp, #952]
+ ldr r11, [sp, #956]
+ ldr r5, [sp, #960]
+ ldr r4, [sp, #964]
+ mov r1, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #904
+ bl .LmulPv320x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r10, [sp, #940]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #936]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #904]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #944]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r8, r1, r0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #896]
+ add r11, sp, #856
+ ldr r6, [sp, #880]
+ ldr r7, [sp, #876]
+ ldr r5, [sp, #872]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #808
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #848]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #844]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #760
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #800]
+ add r11, sp, #760
+ ldr r5, [sp, #784]
+ ldr r7, [sp, #780]
+ ldr r4, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #712
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #752]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #748]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #744]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #712]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #740]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #704]
+ add r11, sp, #664
+ ldr r6, [sp, #688]
+ ldr r7, [sp, #684]
+ ldr r5, [sp, #680]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #616
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #656]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #652]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #568
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #608]
+ add r11, sp, #568
+ ldr r5, [sp, #592]
+ ldr r7, [sp, #588]
+ ldr r4, [sp, #584]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #520
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #560]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #556]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #520]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #548]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #512]
+ add r11, sp, #472
+ ldr r6, [sp, #496]
+ ldr r7, [sp, #492]
+ ldr r5, [sp, #488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #424
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #464]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #460]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #376
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #416]
+ add r11, sp, #376
+ ldr r5, [sp, #400]
+ ldr r7, [sp, #396]
+ ldr r4, [sp, #392]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #328
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #368]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #364]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #328]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #320]
+ add r11, sp, #280
+ ldr r6, [sp, #304]
+ ldr r7, [sp, #300]
+ ldr r5, [sp, #296]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #232
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #272]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #268]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #184
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #224]
+ add r11, sp, #184
+ ldr r5, [sp, #208]
+ ldr r7, [sp, #204]
+ ldr r4, [sp, #200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #136
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr lr, [sp, #140]
+ ldr r6, [sp, #144]
+ add r8, sp, #152
+ ldr r12, [sp, #148]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ adcs r9, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #136]
+ str r1, [sp, #48] @ 4-byte Spill
+ adds r4, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r9, r9, lr
+ adcs r11, r10, r6
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r8, {r0, r1, r2, r3, r5, r7, r8}
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, r12
+ ldr r6, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #88
+ adc r8, r8, #0
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #88
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r7, r9, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r9, r11, r2
+ ldr r2, [sp, #104]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs lr, r10, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r6, r0, r2
+ ldr r2, [sp, #108]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #56] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #112]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r2, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r4, [sp, #60] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #124]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r2
+ ldr r2, [sp, #128]
+ mov r0, r5
+ str r11, [sp, #72] @ 4-byte Spill
+ adc r1, r8, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r0, {r2, r8}
+ ldr r5, [r0, #16]
+ ldr r10, [r0]
+ ldr r3, [r0, #12]
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r0, #20]
+ subs r10, r7, r10
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r0, #24]
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r0, #28]
+ str r5, [sp, #40] @ 4-byte Spill
+ mov r5, r0
+ sbcs r0, r9, r2
+ sbcs r2, lr, r8
+ ldr r8, [r5, #32]
+ sbcs r7, r6, r3
+ ldr r3, [r5, #36]
+ ldr r6, [sp, #80] @ 4-byte Reload
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ sbcs lr, r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r4, r12, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ sbcs r12, r5, r3
+ ldr r3, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r11, r11, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r10, r5
+ movlt r0, r9
+ str r10, [r8]
+ str r0, [r8, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movlt r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ str r2, [r8, #8]
+ movlt r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r7, [r8, #12]
+ movlt r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r6, [r8, #16]
+ movlt lr, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r8, #20]
+ movlt r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r8, #24]
+ movlt r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r12, [r8, #28]
+ movlt r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r8, #32]
+ movlt r3, r0
+ str r3, [r8, #36]
+ add sp, sp, #28
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end147:
+ .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed10L
+ .align 2
+ .type mcl_fp_montRed10L,%function
+mcl_fp_montRed10L: @ @mcl_fp_montRed10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #612
+ sub sp, sp, #612
+ mov r5, r2
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r9, [r1]
+ ldr r11, [r1, #16]
+ ldr r0, [r5]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r5, #-4]
+ str r0, [sp, #124] @ 4-byte Spill
+ mul r2, r9, r0
+ ldr r0, [r5, #28]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ mov r1, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #560
+ bl .LmulPv320x32(PLT)
+ add lr, sp, #584
+ ldr r10, [sp, #600]
+ ldr r8, [sp, #596]
+ add r7, sp, #564
+ ldm lr, {r6, r12, lr}
+ ldr r4, [sp, #560]
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r4, r9, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ adcs r0, r11, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #512
+ bl .LmulPv320x32(PLT)
+ add r6, sp, #512
+ ldr r12, [sp, #552]
+ ldr lr, [sp, #548]
+ ldr r2, [sp, #544]
+ ldr r10, [sp, #540]
+ ldr r11, [sp, #536]
+ ldr r7, [sp, #532]
+ ldr r8, [sp, #528]
+ ldm r6, {r1, r3, r6}
+ ldr r0, [sp, #524]
+ adds r1, r4, r1
+ ldr r4, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r9, r9, r3
+ adcs r1, r1, r6
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #464
+ bl .LmulPv320x32(PLT)
+ ldr r1, [sp, #464]
+ ldr r0, [sp, #504]
+ add r12, sp, #468
+ ldr r10, [sp, #500]
+ ldr r8, [sp, #496]
+ ldr lr, [sp, #492]
+ ldr r6, [sp, #488]
+ ldr r7, [sp, #484]
+ adds r1, r9, r1
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r2, r3, r12}
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #416
+ bl .LmulPv320x32(PLT)
+ add r7, sp, #416
+ ldr r12, [sp, #456]
+ ldr lr, [sp, #452]
+ ldr r2, [sp, #448]
+ ldr r3, [sp, #444]
+ add r10, sp, #428
+ ldm r7, {r1, r6, r7}
+ ldm r10, {r0, r8, r9, r10}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #400
+ add r12, sp, #372
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #368]
+ ldr lr, [sp, #396]
+ ldr r6, [sp, #392]
+ ldr r7, [sp, #388]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv320x32(PLT)
+ add r7, sp, #320
+ ldr r12, [sp, #360]
+ ldr lr, [sp, #356]
+ ldr r2, [sp, #352]
+ ldr r3, [sp, #348]
+ add r10, sp, #332
+ ldm r7, {r1, r6, r7}
+ ldm r10, {r0, r8, r9, r10}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #304
+ add r12, sp, #276
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #272]
+ ldr lr, [sp, #300]
+ ldr r6, [sp, #296]
+ ldr r7, [sp, #292]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #240
+ add r6, sp, #224
+ ldr r12, [sp, #264]
+ ldr lr, [sp, #260]
+ ldr r8, [sp, #256]
+ ldr r9, [sp, #252]
+ ldm r10, {r0, r7, r10}
+ ldm r6, {r1, r2, r3, r6}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r2
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r11, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r4, r7
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv320x32(PLT)
+ add r12, sp, #176
+ ldm r12, {r0, r1, r3, r12}
+ ldr lr, [sp, #216]
+ adds r0, r4, r0
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r10, r11, r1
+ ldr r1, [sp, #192]
+ adcs r0, r6, r3
+ mul r2, r10, r7
+ ldr r7, [sp, #200]
+ ldr r6, [sp, #204]
+ ldr r3, [sp, #208]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ ldr r12, [sp, #212]
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r8, r4, r1
+ ldr r0, [sp, #196]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r1, r5
+ adcs r9, r9, r7
+ adcs r6, r0, r6
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #128
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r0, r2
+ ldr r0, [sp, #144]
+ adcs r2, r8, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r7, r11, r0
+ ldr r0, [sp, #148]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r12, r9, r0
+ ldr r0, [sp, #152]
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs r4, r6, r0
+ ldr r0, [sp, #156]
+ str r4, [sp, #56] @ 4-byte Spill
+ adcs r5, r3, r0
+ ldr r0, [sp, #160]
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r5, [sp, #60] @ 4-byte Spill
+ adcs r6, r3, r0
+ ldr r0, [sp, #164]
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ adcs r8, r3, r0
+ ldr r0, [sp, #168]
+ ldr r3, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #124] @ 4-byte Spill
+ adcs lr, r3, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r11, r0, #0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ subs r3, r10, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbcs r12, r12, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r7, r4, r7
+ ldr r4, [sp, #104] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #80] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r9, r8, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ sbcs r8, lr, r6
+ sbc r6, r11, #0
+ ands r11, r6, #1
+ ldr r6, [sp, #120] @ 4-byte Reload
+ movne r3, r10
+ str r3, [r6]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movne r0, r3
+ str r0, [r6, #4]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r11, #0
+ str r1, [r6, #8]
+ movne r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r2, [r6, #12]
+ movne r12, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r12, [r6, #16]
+ movne r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r11, #0
+ str r7, [r6, #20]
+ movne r4, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r4, [r6, #24]
+ movne r5, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ str r5, [r6, #28]
+ movne r9, r0
+ cmp r11, #0
+ movne r8, lr
+ str r9, [r6, #32]
+ str r8, [r6, #36]
+ add sp, sp, #612
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end148:
+ .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre10L
+ .align 2
+ .type mcl_fp_addPre10L,%function
+mcl_fp_addPre10L: @ @mcl_fp_addPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r3, r8, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7, r10}
+ ldr r4, [r2, #20]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ adds r12, r11, r3
+ ldr r11, [r2, #32]
+ adcs r5, r5, r8
+ ldr r8, [r1, #36]
+ adcs r6, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #36]
+ str r4, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r4, lr}
+ str r12, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r10, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ str r1, [r0, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #28]
+ adcs r1, r11, lr
+ adcs r2, r2, r8
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end149:
+ .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre10L
+ .align 2
+ .type mcl_fp_subPre10L,%function
+mcl_fp_subPre10L: @ @mcl_fp_subPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldr r3, [r2, #4]
+ ldr r7, [r2]
+ ldr r11, [r1]
+ ldr r6, [r1, #4]
+ ldr r9, [r2, #8]
+ ldr r5, [r1, #8]
+ ldr lr, [r2, #12]
+ ldr r4, [r1, #12]
+ ldr r12, [r1, #16]
+ ldr r8, [r1, #20]
+ ldr r10, [r1, #24]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #16]
+ subs r7, r11, r7
+ ldr r11, [r2, #32]
+ str r7, [r0]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r2, #36]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r1, #28]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp] @ 4-byte Reload
+ sbcs r6, r6, r3
+ sbcs r5, r5, r9
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, lr
+ ldr lr, [r1, #32]
+ ldr r1, [r1, #36]
+ str r4, [r0, #12]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs r3, r12, r5
+ str r3, [r0, #16]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ sbcs r7, r8, r4
+ str r7, [r0, #20]
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r3, r10, r3
+ str r3, [r0, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ sbcs r3, r7, r3
+ str r3, [r0, #28]
+ sbcs r3, lr, r11
+ sbcs r1, r1, r2
+ str r3, [r0, #32]
+ str r1, [r0, #36]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end150:
+ .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_10L
+ .align 2
+ .type mcl_fp_shr1_10L,%function
+mcl_fp_shr1_10L: @ @mcl_fp_shr1_10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr lr, [r1, #32]
+ ldr r12, [r1, #36]
+ ldr r8, [r1, #28]
+ ldm r1, {r1, r2, r3, r4, r5, r6, r9}
+ lsrs r7, r2, #1
+ rrx r1, r1
+ str r1, [r0]
+ lsr r1, r2, #1
+ lsr r2, r12, #1
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #4]
+ lsrs r1, r4, #1
+ rrx r1, r3
+ str r1, [r0, #8]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r6, #1
+ rrx r1, r5
+ str r1, [r0, #16]
+ lsr r1, r6, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r9
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r12, #1
+ rrx r1, lr
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end151:
+ .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add10L
+ .align 2
+ .type mcl_fp_add10L,%function
+mcl_fp_add10L: @ @mcl_fp_add10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r10, [r1, #8]
+ ldr r8, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r9, r5, r12
+ ldr r5, [r1, #24]
+ adcs lr, r4, lr
+ ldr r4, [r1, #20]
+ adcs r6, r6, r10
+ ldr r10, [r1, #36]
+ str lr, [sp] @ 4-byte Spill
+ str r6, [sp, #12] @ 4-byte Spill
+ adcs r12, r7, r8
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ adcs r6, r7, r6
+ ldr r7, [r2, #20]
+ str r6, [sp, #4] @ 4-byte Spill
+ adcs r8, r7, r4
+ ldr r4, [r2, #24]
+ adcs r6, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r7, r5, r4
+ ldr r5, [r1, #32]
+ ldr r1, [r2, #32]
+ ldr r2, [r2, #36]
+ stm r0, {r9, lr}
+ mov lr, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r11, r1, r5
+ add r1, r0, #24
+ adcs r10, r2, r10
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r2, [r0, #8]
+ str lr, [r0, #12]
+ str r4, [r0, #16]
+ str r8, [r0, #20]
+ stm r1, {r6, r7, r11}
+ mov r1, #0
+ str r10, [r0, #36]
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r1, r6, r12}
+ ldr r5, [r3, #12]
+ subs r9, r9, r1
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r6, r1, r6
+ sbcs r1, r2, r12
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r12, lr, r5
+ sbcs lr, r4, r1
+ ldr r1, [r3, #20]
+ ldr r4, [sp, #16] @ 4-byte Reload
+ sbcs r8, r8, r1
+ ldr r1, [r3, #24]
+ sbcs r5, r2, r1
+ ldr r2, [r3, #28]
+ sbcs r1, r7, r2
+ ldr r2, [r3, #32]
+ ldr r7, [r3, #36]
+ sbcs r3, r11, r2
+ sbcs r2, r10, r7
+ sbc r4, r4, #0
+ tst r4, #1
+ bne .LBB152_2
+@ BB#1: @ %nocarry
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r9, [r0]
+ str r6, [r0, #4]
+ str r4, [r0, #8]
+ str r12, [r0, #12]
+ str lr, [r0, #16]
+ str r8, [r0, #20]
+ str r5, [r0, #24]
+ str r1, [r0, #28]
+ str r3, [r0, #32]
+ str r2, [r0, #36]
+.LBB152_2: @ %carry
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end152:
+ .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF10L
+ .align 2
+ .type mcl_fp_addNF10L,%function
+mcl_fp_addNF10L: @ @mcl_fp_addNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r5, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r10, [r1, #24]
+ adds r9, r5, r9
+ ldr r5, [r1, #16]
+ adcs r11, r4, r8
+ ldr r8, [r1, #20]
+ str r9, [sp, #16] @ 4-byte Spill
+ adcs r6, r6, lr
+ str r11, [sp, #20] @ 4-byte Spill
+ str r6, [sp, #32] @ 4-byte Spill
+ adcs r6, r7, r12
+ ldr r7, [r2, #16]
+ str r6, [sp, #24] @ 4-byte Spill
+ adcs r4, r7, r5
+ ldr r7, [r2, #20]
+ ldr r5, [r2, #28]
+ str r4, [sp, #28] @ 4-byte Spill
+ adcs r7, r7, r8
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r10
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ ldr r5, [r1, #32]
+ ldr r1, [r1, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ ldr r2, [r2, #36]
+ adcs lr, r7, r5
+ adc r1, r2, r1
+ str lr, [sp, #36] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ ldmib r3, {r1, r2, r12}
+ ldr r7, [r3, #20]
+ ldr r8, [r3]
+ ldr r10, [sp, #32] @ 4-byte Reload
+ ldr r5, [r3, #16]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ subs r8, r9, r8
+ sbcs r1, r11, r1
+ ldr r11, [r3, #32]
+ sbcs r2, r10, r2
+ sbcs r12, r6, r12
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ ldr r3, [r3, #36]
+ sbcs r6, r4, r5
+ ldr r4, [sp, #4] @ 4-byte Reload
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ sbcs r9, r5, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp] @ 4-byte Reload
+ sbcs r11, lr, r11
+ sbc lr, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ asr r7, lr, #31
+ cmp r7, #0
+ movlt r2, r10
+ movlt r8, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r8, [r0]
+ movlt r1, r5
+ cmp r7, #0
+ str r1, [r0, #4]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #8]
+ movlt r12, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r12, [r0, #12]
+ movlt r6, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r6, [r0, #16]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ str r3, [r0, #20]
+ movlt r4, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r9, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r9, [r0, #28]
+ movlt r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #32]
+ movlt lr, r1
+ str lr, [r0, #36]
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end153:
+ .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub10L
+ .align 2
+ .type mcl_fp_sub10L,%function
+mcl_fp_sub10L: @ @mcl_fp_sub10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r2, {r12, lr}
+ ldr r8, [r2, #8]
+ ldr r10, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r12
+ ldr r12, [r1, #36]
+ sbcs r9, r5, lr
+ ldr r5, [r2, #20]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs lr, r6, r8
+ ldr r6, [r2, #16]
+ sbcs r8, r7, r10
+ ldr r7, [r1, #16]
+ sbcs r10, r7, r6
+ ldr r6, [r1, #20]
+ sbcs r7, r6, r5
+ ldr r5, [r1, #24]
+ ldr r6, [r1, #32]
+ str r7, [sp, #28] @ 4-byte Spill
+ sbcs r11, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ sbcs r5, r5, r4
+ ldr r4, [r2, #32]
+ ldr r2, [r2, #36]
+ sbcs r1, r6, r4
+ mov r6, #0
+ sbcs r2, r12, r2
+ ldr r12, [sp, #32] @ 4-byte Reload
+ sbc r6, r6, #0
+ tst r6, #1
+ str r12, [r0]
+ stmib r0, {r9, lr}
+ str r8, [r0, #12]
+ str r10, [r0, #16]
+ str r7, [r0, #20]
+ mov r7, r11
+ str r7, [r0, #24]
+ str r5, [r0, #28]
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ beq .LBB154_2
+@ BB#1: @ %carry
+ ldr r4, [r3, #32]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r3, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ ldmib r3, {r4, r11}
+ ldr r6, [r3, #12]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r3, #16]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r3, #24]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r3, r12
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r4, r4, r9
+ stm r0, {r3, r4}
+ adcs r3, r11, lr
+ str r3, [r0, #8]
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r8
+ str r3, [r0, #12]
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs r3, r3, r10
+ str r3, [r0, #16]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r6, r3
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r7
+ str r3, [r0, #24]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r3, [r0, #28]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #32]
+ adc r2, r3, r2
+ str r2, [r0, #36]
+.LBB154_2: @ %nocarry
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end154:
+ .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF10L
+ .align 2
+ .type mcl_fp_subNF10L,%function
+mcl_fp_subNF10L: @ @mcl_fp_subNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ mov r12, r0
+ ldr r0, [r2, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r2, {r4, r5}
+ ldr r0, [r2, #8]
+ ldr r7, [r2, #16]
+ ldr r8, [r2, #20]
+ ldr lr, [r1, #12]
+ ldr r6, [r1, #16]
+ ldr r11, [r1, #20]
+ ldr r9, [r1, #24]
+ ldr r10, [r1, #28]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #8]
+ ldm r1, {r1, r2}
+ subs r1, r1, r4
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ sbcs r4, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #20] @ 4-byte Spill
+ sbcs r5, lr, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ sbcs lr, r11, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ str lr, [sp, #36] @ 4-byte Spill
+ sbcs r8, r9, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r8, [sp, #48] @ 4-byte Spill
+ sbcs r9, r10, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r9, [sp, #56] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ sbc r1, r6, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r3, {r1, r6}
+ ldr r11, [r3, #24]
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #12]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r3, #16]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r10, r3
+ adcs r1, r2, r1
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r2, r4, r2
+ ldr r4, [sp] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r11, r8, r11
+ adcs r8, r9, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r9, r0, r6
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ asr lr, r0, #31
+ adc r6, r0, r6
+ cmp lr, #0
+ movge r3, r10
+ str r3, [r12]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movge r1, r3
+ str r1, [r12, #4]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ movge r2, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp lr, #0
+ str r2, [r12, #8]
+ movge r4, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r7, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp lr, #0
+ str r7, [r12, #20]
+ movge r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r11, [r12, #24]
+ movge r8, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r1
+ cmp lr, #0
+ movge r6, r0
+ str r9, [r12, #32]
+ str r6, [r12, #36]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end155:
+ .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add10L
+ .align 2
+ .type mcl_fpDbl_add10L,%function
+mcl_fpDbl_add10L: @ @mcl_fpDbl_add10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ ldm r1, {r7, r9}
+ ldr r8, [r1, #8]
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ add lr, r1, #16
+ adds r7, r4, r7
+ ldr r4, [r2, #16]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #92] @ 4-byte Spill
+ adcs r7, r5, r9
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r6, r8
+ ldr r8, [r2, #20]
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r10, r12
+ add r10, r1, #32
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r7, r9, r10}
+ ldr r2, [r1, #48]
+ ldr r5, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r11, [r0]
+ str r6, [r0, #4]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r6, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [r0, #32]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #36]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs lr, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r5
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r12, r1, r2
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r12, [sp, #72] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #80] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs r10, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [sp, #64] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #92] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldmib r3, {r1, r2, r8}
+ ldr r6, [r3, #16]
+ ldr r11, [r3]
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ subs r11, lr, r11
+ sbcs r1, r4, r1
+ sbcs r2, r12, r2
+ sbcs r12, r5, r8
+ ldr r8, [r3, #32]
+ ldr r5, [r3, #36]
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [r3, #24]
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r6, [sp, #60] @ 4-byte Spill
+ sbcs r6, r7, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r4, [sp, #60] @ 4-byte Reload
+ sbcs r7, r9, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ sbcs r9, r10, r3
+ ldr r3, [sp, #100] @ 4-byte Reload
+ sbcs r10, r3, r4
+ ldr r3, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r8, r3, r8
+ ldr r3, [sp, #92] @ 4-byte Reload
+ sbcs r5, r3, r5
+ ldr r3, [sp, #88] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r11, lr
+ movne r1, r4
+ str r11, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r3, #0
+ str r2, [r0, #48]
+ movne r12, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r12, [r0, #52]
+ movne r6, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r6, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r3, #0
+ str r7, [r0, #60]
+ movne r9, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r9, [r0, #64]
+ movne r10, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r10, [r0, #68]
+ movne r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r3, #0
+ str r8, [r0, #72]
+ movne r5, r1
+ str r5, [r0, #76]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end156:
+ .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub10L
+ .align 2
+ .type mcl_fpDbl_sub10L,%function
+mcl_fpDbl_sub10L: @ @mcl_fpDbl_sub10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #96
+ sub sp, sp, #96
+ ldr r7, [r2, #64]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldm r2, {r6, r7, r8, r9}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r10, [r2, #20]
+ ldr r5, [r1, #12]
+ subs r11, r12, r6
+ ldr r6, [r2, #28]
+ sbcs r7, lr, r7
+ add lr, r1, #16
+ sbcs r8, r4, r8
+ ldr r4, [r2, #16]
+ sbcs r5, r5, r9
+ ldr r9, [r1, #32]
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r1, #44]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r11, [r0]
+ stmib r0, {r7, r8}
+ str r5, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r8, [r3, #20]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ sbcs r2, r2, r10
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ sbcs r2, lr, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #56] @ 4-byte Reload
+ sbcs r1, r9, r1
+ sbcs r2, r7, r2
+ str r1, [r0, #32]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r12, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ sbcs r4, r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r11, r2, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r11, [sp, #52] @ 4-byte Spill
+ sbcs r6, r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [sp, #64] @ 4-byte Spill
+ sbcs r7, r2, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ sbcs r9, r2, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #76] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r10, r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [sp, #80] @ 4-byte Spill
+ sbcs lr, r2, r1
+ mov r1, #0
+ ldr r2, [r3, #4]
+ sbc r1, r1, #0
+ str lr, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r1, r12, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r2, r4, r2
+ adcs r3, r11, r3
+ adcs r12, r6, r5
+ ldr r6, [sp, #36] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r8, r9, r8
+ adcs r9, r5, r7
+ ldr r5, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r10, lr, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ands lr, r5, #1
+ ldr r5, [sp, #48] @ 4-byte Reload
+ moveq r2, r4
+ moveq r1, r5
+ str r1, [r0, #40]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #44]
+ moveq r3, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp lr, #0
+ str r3, [r0, #48]
+ moveq r12, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r12, [r0, #52]
+ moveq r6, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r6, [r0, #56]
+ moveq r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp lr, #0
+ str r8, [r0, #60]
+ moveq r9, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r9, [r0, #64]
+ moveq r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r7, [r0, #68]
+ moveq r11, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp lr, #0
+ str r11, [r0, #72]
+ moveq r10, r1
+ str r10, [r0, #76]
+ add sp, sp, #96
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv352x32,%function
+.LmulPv352x32: @ @mulPv352x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r1, [r1, #40]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #40]
+ adc r1, r7, #0
+ str r1, [r0, #44]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre11L
+ .align 2
+ .type mcl_fp_mulUnitPre11L,%function
+mcl_fp_mulUnitPre11L: @ @mcl_fp_mulUnitPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv352x32(PLT)
+ ldr r12, [sp, #44]
+ ldr lr, [sp, #40]
+ ldr r8, [sp, #36]
+ ldr r9, [sp, #32]
+ ldr r10, [sp, #28]
+ ldr r1, [sp, #24]
+ ldr r5, [sp, #20]
+ ldr r6, [sp, #16]
+ ldr r7, [sp]
+ ldmib sp, {r2, r3}
+ ldr r0, [sp, #12]
+ str r7, [r4]
+ stmib r4, {r2, r3}
+ str r0, [r4, #12]
+ str r6, [r4, #16]
+ str r5, [r4, #20]
+ str r1, [r4, #24]
+ str r10, [r4, #28]
+ str r9, [r4, #32]
+ str r8, [r4, #36]
+ str lr, [r4, #40]
+ str r12, [r4, #44]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre11L
+ .align 2
+ .type mcl_fpDbl_mulPre11L,%function
+mcl_fpDbl_mulPre11L: @ @mcl_fpDbl_mulPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #604
+ sub sp, sp, #604
+ mov r3, r2
+ mov r4, r0
+ add r0, sp, #552
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r3]
+ str r3, [sp, #64] @ 4-byte Spill
+ str r4, [sp, #60] @ 4-byte Spill
+ mov r6, r3
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #596]
+ ldr r1, [sp, #560]
+ ldr r2, [r6, #4]
+ ldr r11, [sp, #556]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [r4]
+ add r0, sp, #504
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #548]
+ add r10, sp, #532
+ add r12, sp, #508
+ mov r6, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r1, [sp, #504]
+ ldr lr, [sp, #528]
+ ldr r7, [sp, #524]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r1, r11
+ str r1, [r4, #4]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [r5, #8]
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #500]
+ add r10, sp, #484
+ add r12, sp, #460
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #480]
+ ldr r7, [sp, #476]
+ ldr r1, [sp, #456]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r6, #8]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #408
+ bl .LmulPv352x32(PLT)
+ add r10, sp, #444
+ add lr, sp, #432
+ add r12, sp, #412
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r6, r11, lr}
+ ldr r7, [sp, #428]
+ ldr r1, [sp, #408]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ str r1, [r4, #12]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #404]
+ add r10, sp, #392
+ add r12, sp, #364
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #388]
+ ldr r6, [sp, #384]
+ ldr r7, [sp, #380]
+ ldr r1, [sp, #360]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r4, #16]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #20]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv352x32(PLT)
+ add r11, sp, #344
+ add r12, sp, #316
+ ldm r11, {r8, r9, r10, r11}
+ ldr lr, [sp, #340]
+ ldr r6, [sp, #336]
+ ldr r7, [sp, #332]
+ ldr r1, [sp, #312]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r1, [r5, #20]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #24]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #264
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #308]
+ add r10, sp, #296
+ add r12, sp, #268
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #292]
+ ldr r6, [sp, #288]
+ ldr r7, [sp, #284]
+ ldr r1, [sp, #264]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r5, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv352x32(PLT)
+ add r10, sp, #252
+ add lr, sp, #240
+ add r12, sp, #220
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r6, r11, lr}
+ ldr r7, [sp, #236]
+ ldr r1, [sp, #216]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #212]
+ add r10, sp, #200
+ add r12, sp, #172
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #196]
+ ldr r6, [sp, #192]
+ ldr r7, [sp, #188]
+ ldr r1, [sp, #168]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r1, r1, r11
+ ldr r11, [sp, #64] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r11, #36]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #120
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #164]
+ add lr, sp, #152
+ add r10, sp, #140
+ add r8, sp, #128
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r9, r12, lr}
+ ldm r10, {r0, r6, r10}
+ ldr r2, [sp, #120]
+ ldr r3, [sp, #124]
+ ldm r8, {r1, r7, r8}
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r2, r2, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ str r2, [r4, #36]
+ ldr r2, [r11, #40]
+ adcs r11, r3, r5
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r5, r1, r3
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r8, r8, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #72
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ ldr r9, [sp, #116]
+ ldr r6, [sp, #112]
+ adds r12, r0, r11
+ add r11, sp, #88
+ adcs lr, r1, r5
+ adcs r2, r2, r7
+ adcs r3, r3, r8
+ ldr r8, [sp, #108]
+ ldm r11, {r0, r1, r5, r7, r11}
+ str r12, [r4, #40]
+ str lr, [r4, #44]
+ str r2, [r4, #48]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ add r12, r4, #72
+ str r3, [r4, #52]
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #56]
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r10
+ str r1, [r4, #60]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #64]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [r4, #68]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r8, r1
+ adcs r2, r6, r2
+ adc r3, r9, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #604
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre11L
+ .align 2
+ .type mcl_fpDbl_sqrPre11L,%function
+mcl_fpDbl_sqrPre11L: @ @mcl_fpDbl_sqrPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #596
+ sub sp, sp, #596
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #544
+ ldr r2, [r5]
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r1, [sp, #548]
+ ldr r2, [r5, #4]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r1, [sp, #24] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [r4]
+ add r0, sp, #496
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #540]
+ add r10, sp, #520
+ add lr, sp, #496
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #492]
+ add r10, sp, #476
+ add lr, sp, #448
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #472]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #8]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #444]
+ add r10, sp, #428
+ add lr, sp, #400
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #424]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #12]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #396]
+ add r10, sp, #380
+ add lr, sp, #352
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #376]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #16]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #304
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #348]
+ add r10, sp, #332
+ add lr, sp, #304
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #20]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #300]
+ add r10, sp, #284
+ add lr, sp, #256
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #280]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #24]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #252]
+ add r10, sp, #236
+ add lr, sp, #208
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #232]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #204]
+ add r10, sp, #188
+ add lr, sp, #160
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #184]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #32]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #156]
+ add lr, sp, #140
+ add r12, sp, #124
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r8, r11, lr}
+ ldr r9, [sp, #136]
+ ldr r2, [sp, #112]
+ ldr r7, [sp, #116]
+ ldr r6, [sp, #120]
+ ldm r12, {r0, r3, r12}
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adds r2, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r4, #36]
+ ldr r2, [r5, #40]
+ adcs r7, r7, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r11, r11, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #64
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #64
+ ldm r3, {r0, r1, r2, r3}
+ ldr r9, [sp, #108]
+ ldr r8, [sp, #104]
+ adds r12, r0, r7
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs lr, r1, r6
+ adcs r2, r2, r10
+ add r10, sp, #80
+ adcs r3, r3, r0
+ ldm r10, {r0, r1, r5, r6, r7, r10}
+ str r12, [r4, #40]
+ str lr, [r4, #44]
+ str r2, [r4, #48]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ add r12, r4, #72
+ str r3, [r4, #52]
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r1, [r4, #60]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r0, r6, r11
+ str r0, [r4, #68]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r10, r1
+ adcs r2, r8, r2
+ adc r3, r9, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #596
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont11L
+ .align 2
+ .type mcl_fp_mont11L,%function
+mcl_fp_mont11L: @ @mcl_fp_mont11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #132
+ sub sp, sp, #132
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #1104
+ str r3, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #84] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #80] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1108]
+ ldr r8, [sp, #1104]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ mul r2, r8, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1056
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r2, [r7, #4]
+ ldr r11, [sp, #1072]
+ ldr r5, [sp, #1056]
+ ldr r4, [sp, #1060]
+ ldr r10, [sp, #1064]
+ ldr r9, [sp, #1068]
+ mov r1, r6
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv352x32(PLT)
+ adds r0, r5, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #1008
+ ldr r7, [sp, #1044]
+ ldr r6, [sp, #1040]
+ ldr r5, [sp, #1036]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r8, r4, r0
+ mov r0, #0
+ ldr r4, [sp, #1032]
+ adcs r1, r10, r1
+ ldr r10, [sp, #1052]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r11, r1
+ ldr r11, [sp, #1048]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r8, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #960
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #984]
+ ldr r6, [sp, #980]
+ ldr r9, [sp, #976]
+ ldr r10, [sp, #960]
+ ldr r11, [sp, #964]
+ ldr r7, [sp, #968]
+ ldr r4, [sp, #972]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #912
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #916
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #940
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #912]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #864
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #908]
+ add r11, sp, #864
+ ldr r7, [sp, #888]
+ ldr r5, [sp, #884]
+ ldr r8, [sp, #880]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #876]
+ ldr r2, [r0, #12]
+ add r0, sp, #816
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #816
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #840
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #768
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #792]
+ ldr r6, [sp, #788]
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #768]
+ ldr r11, [sp, #772]
+ ldr r7, [sp, #776]
+ ldr r4, [sp, #780]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #720
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #724
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #748
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #720]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #672
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #716]
+ add r11, sp, #672
+ ldr r7, [sp, #696]
+ ldr r5, [sp, #692]
+ ldr r8, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #684]
+ ldr r2, [r0, #20]
+ add r0, sp, #624
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #624
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #648
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #576
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #620]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #600]
+ ldr r6, [sp, #596]
+ ldr r9, [sp, #592]
+ ldr r10, [sp, #576]
+ ldr r11, [sp, #580]
+ ldr r7, [sp, #584]
+ ldr r4, [sp, #588]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #528
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #532
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #556
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #528]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #480
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #524]
+ add r11, sp, #480
+ ldr r7, [sp, #504]
+ ldr r5, [sp, #500]
+ ldr r8, [sp, #496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #492]
+ ldr r2, [r0, #28]
+ add r0, sp, #432
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #432
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #456
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #384
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #428]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #408]
+ ldr r6, [sp, #404]
+ ldr r9, [sp, #400]
+ ldr r10, [sp, #384]
+ ldr r11, [sp, #388]
+ ldr r7, [sp, #392]
+ ldr r4, [sp, #396]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #336
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #340
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #364
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #336]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #288
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #332]
+ add r11, sp, #288
+ ldr r7, [sp, #312]
+ ldr r5, [sp, #308]
+ ldr r8, [sp, #304]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #324]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #320]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #300]
+ ldr r2, [r0, #36]
+ add r0, sp, #240
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #240
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #264
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #192
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #236]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #216]
+ ldr r6, [sp, #212]
+ ldr r9, [sp, #208]
+ ldr r10, [sp, #192]
+ ldr r11, [sp, #196]
+ ldr r7, [sp, #200]
+ ldr r4, [sp, #204]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #232]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #228]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #144
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #144
+ add r12, sp, #160
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r10, r1, r7
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r11, r1, r4
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #180
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #48] @ 4-byte Spill
+ ldm lr, {r2, r6, lr}
+ ldr r5, [sp, #156]
+ adds r4, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r6, r10, r6
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r9, {r7, r8, r9}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r6, [sp, #40] @ 4-byte Spill
+ adcs r6, r11, lr
+ ldr r10, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r11, r6, r5
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r3
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r8, r0, r9
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #96
+ bl .LmulPv352x32(PLT)
+ add r7, sp, #96
+ ldm r7, {r0, r1, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str lr, [sp, #44] @ 4-byte Spill
+ adcs r1, r0, r3
+ ldr r3, [sp, #112]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r9, r11, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ adcs r6, r6, r3
+ ldr r3, [sp, #116]
+ str r6, [sp, #52] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #120]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r12, r0, r3
+ ldr r3, [sp, #124]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r5, r5, r3
+ ldr r3, [sp, #128]
+ str r5, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #132]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #136]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #140]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r8, r8, r3
+ adc r0, r0, #0
+ str r8, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r10, {r3, r7}
+ ldr r4, [r10, #16]
+ ldr r11, [r10]
+ ldr r2, [r10, #12]
+ mov r0, r10
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r10, #20]
+ subs r11, lr, r11
+ ldr lr, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r10, #24]
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r10, #28]
+ sbcs r10, r1, r3
+ mov r3, r9
+ ldr r9, [r0, #32]
+ sbcs r1, r3, r7
+ ldr r7, [r0, #36]
+ ldr r0, [r0, #40]
+ sbcs r2, r6, r2
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs lr, lr, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ sbcs r4, r12, r4
+ ldr r12, [sp, #88] @ 4-byte Reload
+ sbcs r5, r5, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ sbcs r9, r6, r9
+ ldr r6, [sp, #76] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs r0, r8, r0
+ ldr r8, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r11, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movne r1, r3
+ str r11, [r8]
+ movne r10, r0
+ cmp r6, #0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r10, [r8, #4]
+ str r1, [r8, #8]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r8, #12]
+ movne lr, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str lr, [r8, #16]
+ movne r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r8, #20]
+ movne r5, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r5, [r8, #24]
+ movne r12, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r12, [r8, #28]
+ movne r9, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r6, #0
+ str r9, [r8, #32]
+ movne r7, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r7, [r8, #36]
+ movne r0, r1
+ str r0, [r8, #40]
+ add sp, sp, #132
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end162:
+ .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF11L
+ .align 2
+ .type mcl_fp_montNF11L,%function
+mcl_fp_montNF11L: @ @mcl_fp_montNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ .pad #1024
+ sub sp, sp, #1024
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ add r6, sp, #1024
+ str r0, [sp, #68] @ 4-byte Spill
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ add r0, r6, #72
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r10, [sp, #1096]
+ add r9, sp, #1024
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1104]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, r9, #24
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #1072]
+ ldr r7, [sp, #1068]
+ ldr r8, [sp, #1064]
+ ldr r11, [sp, #1048]
+ ldr r4, [sp, #1052]
+ ldr r6, [sp, #1056]
+ ldr r9, [sp, #1060]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1000
+ bl .LmulPv352x32(PLT)
+ adds r0, r11, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add r11, sp, #1024
+ add lr, sp, #1000
+ ldr r10, [sp, #1044]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r1, r0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #952
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #996]
+ add r11, sp, #952
+ ldr r6, [sp, #976]
+ ldr r4, [sp, #972]
+ ldr r8, [sp, #968]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #964]
+ ldr r2, [r0, #8]
+ add r0, sp, #904
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #948]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #932
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #904]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #900]
+ add r11, sp, #856
+ ldr r7, [sp, #880]
+ ldr r5, [sp, #876]
+ ldr r8, [sp, #872]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #868]
+ ldr r2, [r0, #12]
+ add r0, sp, #808
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #852]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #760
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #804]
+ add r11, sp, #760
+ ldr r6, [sp, #784]
+ ldr r4, [sp, #780]
+ ldr r8, [sp, #776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #772]
+ ldr r2, [r0, #16]
+ add r0, sp, #712
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #756]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #740
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #712]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #708]
+ add r11, sp, #664
+ ldr r7, [sp, #688]
+ ldr r5, [sp, #684]
+ ldr r8, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #676]
+ ldr r2, [r0, #20]
+ add r0, sp, #616
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #660]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #568
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #568
+ ldr r6, [sp, #592]
+ ldr r4, [sp, #588]
+ ldr r8, [sp, #584]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #580]
+ ldr r2, [r0, #24]
+ add r0, sp, #520
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #564]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #548
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #520]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #516]
+ add r11, sp, #472
+ ldr r7, [sp, #496]
+ ldr r5, [sp, #492]
+ ldr r8, [sp, #488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #484]
+ ldr r2, [r0, #28]
+ add r0, sp, #424
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #468]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #376
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #376
+ ldr r6, [sp, #400]
+ ldr r4, [sp, #396]
+ ldr r8, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #388]
+ ldr r2, [r0, #32]
+ add r0, sp, #328
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #372]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #356
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #324]
+ add r11, sp, #280
+ ldr r7, [sp, #304]
+ ldr r5, [sp, #300]
+ ldr r8, [sp, #296]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #320]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #292]
+ ldr r2, [r0, #36]
+ add r0, sp, #232
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #276]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #184
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #228]
+ add r11, sp, #184
+ ldr r6, [sp, #208]
+ ldr r4, [sp, #204]
+ ldr r8, [sp, #200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #196]
+ ldr r2, [r0, #40]
+ add r0, sp, #136
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr lr, [sp, #140]
+ add r9, sp, #172
+ add r12, sp, #152
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ adcs r11, r1, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #148]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #144]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #136]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r5, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r11, r11, lr
+ adcs r6, r10, r6
+ mul r1, r5, r0
+ str r1, [sp, #40] @ 4-byte Spill
+ ldm r9, {r7, r8, r9}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #88
+ adc r9, r9, #0
+ bl .LmulPv352x32(PLT)
+ add r7, sp, #88
+ ldm r7, {r0, r1, r3, r7}
+ adds r0, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r8, r11, r1
+ str r8, [sp, #28] @ 4-byte Spill
+ adcs r6, r0, r3
+ ldr r3, [sp, #104]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r10, r7
+ str r6, [sp, #44] @ 4-byte Spill
+ str r2, [sp, #48] @ 4-byte Spill
+ adcs r7, r0, r3
+ ldr r3, [sp, #108]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #112]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r5, r0, r3
+ ldr r3, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r3, [sp, #120]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #124]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #128]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r0, r3
+ ldr r3, [sp, #132]
+ str r10, [sp, #64] @ 4-byte Spill
+ adc r12, r9, r3
+ mov r3, r4
+ str r12, [sp, #40] @ 4-byte Spill
+ ldmib r3, {r0, r1, r9}
+ ldr r4, [r3, #16]
+ ldr r11, [r3]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ subs r11, r8, r11
+ ldr r8, [r3, #36]
+ sbcs r0, r6, r0
+ sbcs r1, r2, r1
+ sbcs r2, r7, r9
+ ldr r9, [r3, #32]
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r3, #28]
+ ldr r3, [r3, #40]
+ str r4, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #72] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r5, lr, r5
+ sbcs lr, r7, r6
+ ldr r7, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r9, r7, r9
+ ldr r7, [sp, #28] @ 4-byte Reload
+ sbcs r10, r10, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ sbc r12, r12, r6
+ asr r6, r12, #31
+ cmp r6, #0
+ movlt r11, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r11, [r8]
+ movlt r0, r7
+ str r0, [r8, #4]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movlt r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r6, #0
+ str r1, [r8, #8]
+ movlt r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r2, [r8, #12]
+ movlt r3, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r3, [r8, #16]
+ movlt r4, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r8, #20]
+ movlt r5, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r5, [r8, #24]
+ movlt lr, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [r8, #28]
+ movlt r9, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r6, #0
+ movlt r10, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movlt r12, r0
+ add r0, r8, #32
+ stm r0, {r9, r10, r12}
+ add sp, sp, #124
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end163:
+ .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed11L
+ .align 2
+ .type mcl_fp_montRed11L,%function
+mcl_fp_montRed11L: @ @mcl_fp_montRed11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #676
+ sub sp, sp, #676
+ mov r10, r2
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r5, [r1]
+ ldr r0, [r10]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r10, #4]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r10, #8]
+ str r2, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r10, #12]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r10, #16]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r10, #20]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r10, #24]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r10, #-4]
+ str r0, [sp, #140] @ 4-byte Spill
+ mul r2, r5, r0
+ ldr r0, [r10, #28]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r10, #32]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r10, #36]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r10, #40]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r10
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #624
+ bl .LmulPv352x32(PLT)
+ add r11, sp, #656
+ add lr, sp, #624
+ ldm r11, {r4, r8, r9, r11}
+ ldr r7, [sp, #652]
+ ldr r6, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r10
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #576
+ bl .LmulPv352x32(PLT)
+ ldr r4, [sp, #576]
+ add r9, sp, #584
+ ldr r12, [sp, #620]
+ ldr lr, [sp, #616]
+ ldr r2, [sp, #612]
+ ldr r3, [sp, #608]
+ ldr r11, [sp, #604]
+ ldr r7, [sp, #600]
+ ldr r6, [sp, #580]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r5, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r5, r4, r6
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ mov r9, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #572]
+ add r11, sp, #560
+ add lr, sp, #528
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r8, r11}
+ ldr r6, [sp, #556]
+ ldr r7, [sp, #552]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r9, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv352x32(PLT)
+ ldr r4, [sp, #480]
+ add r9, sp, #488
+ ldr r12, [sp, #524]
+ ldr lr, [sp, #520]
+ ldr r2, [sp, #516]
+ ldr r3, [sp, #512]
+ ldr r11, [sp, #508]
+ ldr r7, [sp, #504]
+ ldr r6, [sp, #484]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r5, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r5, r4, r6
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #432
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #476]
+ add r11, sp, #460
+ add lr, sp, #432
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #456]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r4, r1
+ mov r1, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv352x32(PLT)
+ ldr r6, [sp, #384]
+ add r9, sp, #392
+ ldr r12, [sp, #428]
+ ldr lr, [sp, #424]
+ ldr r2, [sp, #420]
+ ldr r3, [sp, #416]
+ ldr r11, [sp, #412]
+ ldr r5, [sp, #408]
+ ldr r7, [sp, #388]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r4, r6
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r7
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #380]
+ add r11, sp, #364
+ add lr, sp, #336
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r4, r1
+ mov r1, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv352x32(PLT)
+ ldr r6, [sp, #288]
+ add r9, sp, #296
+ ldr r12, [sp, #332]
+ ldr lr, [sp, #328]
+ ldr r2, [sp, #324]
+ ldr r3, [sp, #320]
+ ldr r11, [sp, #316]
+ ldr r5, [sp, #312]
+ ldr r7, [sp, #292]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r4, r6
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r7
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #284]
+ add r11, sp, #264
+ add lr, sp, #240
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv352x32(PLT)
+ add r6, sp, #192
+ add r7, sp, #208
+ ldm r6, {r0, r1, r3, r6}
+ ldr r12, [sp, #236]
+ ldr lr, [sp, #232]
+ adds r0, r5, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #16] @ 4-byte Reload
+ mul r2, r8, r4
+ adcs r0, r0, r3
+ ldr r3, [sp, #228]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r7, {r0, r1, r4, r7}
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r9, r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r5, r0, r6
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r11, r11, r3
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r6, r0, #0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r1, [sp, #160]
+ str r12, [sp, #44] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r2, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r7, r0, r1
+ ldr r1, [sp, #164]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r8, r4, r1
+ ldr r1, [sp, #168]
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r4, r0, r1
+ ldr r1, [sp, #172]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r5, r5, r1
+ ldr r1, [sp, #176]
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r11, r11, r1
+ ldr r1, [sp, #180]
+ str r11, [sp, #76] @ 4-byte Spill
+ adcs r9, r0, r1
+ ldr r1, [sp, #184]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs lr, r0, r1
+ ldr r1, [sp, #188]
+ str lr, [sp, #88] @ 4-byte Spill
+ adcs r0, r6, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adc r10, r0, #0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #112] @ 4-byte Reload
+ sbcs r12, r8, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ sbcs r8, r4, r7
+ ldr r4, [sp, #120] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r5, r11, r5
+ sbcs r11, r9, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r9, lr, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbcs lr, r6, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ sbc r6, r10, #0
+ ldr r10, [sp, #136] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r0, r7
+ str r0, [r10]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r1, [r10, #4]
+ movne r2, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r2, [r10, #8]
+ movne r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r3, [r10, #12]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r10, #16]
+ movne r8, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r6, #0
+ str r8, [r10, #20]
+ movne r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r10, #24]
+ movne r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r5, [r10, #28]
+ movne r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ cmp r6, #0
+ str r11, [r10, #32]
+ movne r9, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ str r9, [r10, #36]
+ movne lr, r0
+ str lr, [r10, #40]
+ add sp, sp, #676
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end164:
+ .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre11L
+ .align 2
+ .type mcl_fp_addPre11L,%function
+mcl_fp_addPre11L: @ @mcl_fp_addPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r3, r12}
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7, r10}
+ ldr r4, [r2, #20]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ adds lr, r11, r3
+ ldr r3, [r2, #36]
+ ldr r11, [r2, #32]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, r8
+ adcs r7, r7, r9
+ add r9, r1, #32
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #40]
+ str r3, [sp, #8] @ 4-byte Spill
+ str r4, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r10, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #28]
+ adcs r1, r11, r4
+ add r0, r0, #32
+ adcs r2, r2, r8
+ adcs r3, r3, r9
+ stm r0, {r1, r2, r3}
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end165:
+ .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre11L
+ .align 2
+ .type mcl_fp_subPre11L,%function
+mcl_fp_subPre11L: @ @mcl_fp_subPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldmib r2, {r8, r12, lr}
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r11, [r2, #32]
+ ldr r10, [r2, #40]
+ ldr r9, [r1, #36]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r6, r6, r7
+ ldr r7, [r2, #36]
+ sbcs r5, r5, r8
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r12
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r1, #12]
+ sbcs r12, r3, lr
+ add lr, r1, #16
+ ldm lr, {r1, r2, r3, lr}
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #4] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r12, [r0, #12]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r2, r2, r6
+ str r2, [r0, #20]
+ ldr r2, [sp] @ 4-byte Reload
+ sbcs r1, r3, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r1, lr, r1
+ str r1, [r0, #28]
+ sbcs r1, r7, r11
+ add r0, r0, #32
+ sbcs r2, r9, r2
+ sbcs r3, r8, r10
+ stm r0, {r1, r2, r3}
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end166:
+ .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_11L
+ .align 2
+ .type mcl_fp_shr1_11L,%function
+mcl_fp_shr1_11L: @ @mcl_fp_shr1_11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldmib r1, {r2, r3, r12, lr}
+ add r8, r1, #20
+ add r11, r1, #32
+ ldm r8, {r4, r5, r8}
+ ldr r7, [r1]
+ ldm r11, {r9, r10, r11}
+ lsrs r1, r12, #1
+ lsr r6, r2, #1
+ rrx r1, r3
+ lsrs r2, r2, #1
+ orr r6, r6, r3, lsl #31
+ lsr r3, r11, #1
+ rrx r2, r7
+ stm r0, {r2, r6}
+ str r1, [r0, #8]
+ lsr r1, r12, #1
+ lsr r2, r10, #1
+ orr r1, r1, lr, lsl #31
+ orr r2, r2, r11, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r4, #1
+ rrx r1, lr
+ str r1, [r0, #16]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r5
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r10, #1
+ add r0, r0, #32
+ rrx r1, r9
+ stm r0, {r1, r2, r3}
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end167:
+ .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add11L
+ .align 2
+ .type mcl_fp_add11L,%function
+mcl_fp_add11L: @ @mcl_fp_add11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r5, r5, r12
+ ldr r12, [r1, #32]
+ adcs r4, r4, lr
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ ldr lr, [r1, #40]
+ adcs r6, r6, r8
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ adcs r7, r7, r9
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r9, r7, r6
+ ldr r7, [r2, #20]
+ str r9, [sp] @ 4-byte Spill
+ adcs r7, r7, r4
+ ldr r4, [r2, #24]
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r8, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ adcs r6, r5, r4
+ ldr r5, [r2, #32]
+ ldr r4, [r1, #36]
+ ldr r1, [r2, #36]
+ ldr r2, [r2, #40]
+ adcs r10, r5, r12
+ ldr r12, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r11, r2, lr
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr lr, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ str r2, [r0]
+ str r12, [r0, #4]
+ str lr, [r0, #8]
+ str r4, [r0, #12]
+ str r9, [r0, #16]
+ str r7, [r0, #20]
+ str r8, [r0, #24]
+ str r6, [r0, #28]
+ str r10, [r0, #32]
+ str r1, [r0, #36]
+ mov r1, #0
+ str r11, [r0, #40]
+ mov r9, r6
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r1, r7}
+ ldr r5, [r3, #8]
+ ldr r6, [r3, #12]
+ subs r1, r2, r1
+ ldr r2, [sp] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ sbcs r1, r12, r7
+ str r1, [sp, #24] @ 4-byte Spill
+ sbcs r1, lr, r5
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r5, r4, r6
+ sbcs r7, r2, r1
+ ldr r1, [r3, #20]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [r3, #24]
+ sbcs r12, r8, r1
+ ldr r1, [r3, #28]
+ add r3, r3, #32
+ sbcs lr, r9, r1
+ ldm r3, {r1, r2, r3}
+ ldr r6, [sp, #20] @ 4-byte Reload
+ sbcs r1, r10, r1
+ sbcs r2, r6, r2
+ ldr r6, [sp, #16] @ 4-byte Reload
+ sbcs r3, r11, r3
+ sbc r6, r6, #0
+ tst r6, #1
+ bne .LBB168_2
+@ BB#1: @ %nocarry
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r6, [r0]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r6, [r0, #4]
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r6, [r0, #8]
+ str r5, [r0, #12]
+ str r7, [r0, #16]
+ str r4, [r0, #20]
+ str r12, [r0, #24]
+ str lr, [r0, #28]
+ add r0, r0, #32
+ stm r0, {r1, r2, r3}
+.LBB168_2: @ %carry
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end168:
+ .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF11L
+ .align 2
+ .type mcl_fp_addNF11L,%function
+mcl_fp_addNF11L: @ @mcl_fp_addNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r1, {r5, r8, lr}
+ ldr r6, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r7, r9}
+ ldr r11, [r1, #24]
+ adds r10, r6, r5
+ adcs r4, r4, r8
+ ldr r8, [r1, #20]
+ adcs r7, r7, lr
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr lr, [r1, #36]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ adcs r6, r9, r12
+ ldr r12, [r2, #36]
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r4, r7
+ ldr r4, [r2, #28]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r8
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r8, r7, r11
+ ldr r7, [r1, #28]
+ ldr r11, [r1, #40]
+ str r8, [sp, #20] @ 4-byte Spill
+ adcs r7, r4, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldr r1, [r2, #32]
+ ldr r2, [r2, #40]
+ adcs r4, r1, r7
+ adcs r1, r12, lr
+ str r4, [sp, #24] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ adc r9, r2, r11
+ ldmib r3, {r1, r2, lr}
+ ldr r5, [r3, #20]
+ ldr r11, [r3]
+ ldr r7, [r3, #16]
+ ldr r12, [r3, #24]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r3, #28]
+ subs r11, r10, r11
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r1, r5, r1
+ ldr r5, [sp, #40] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [r3, #32]
+ sbcs lr, r6, lr
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r3, #36]
+ ldr r3, [r3, #40]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [sp, #12] @ 4-byte Reload
+ sbcs r7, r3, r7
+ ldr r3, [sp, #52] @ 4-byte Reload
+ sbcs r3, r3, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ sbcs r12, r8, r12
+ sbcs r8, r6, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbc r6, r9, r4
+ asr r4, r6, #31
+ cmp r4, #0
+ movlt r11, r10
+ movlt r1, r5
+ str r11, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ cmp r4, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ movlt lr, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r7, [r0, #16]
+ movlt r3, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ cmp r4, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt r12, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r12, [r0, #24]
+ movlt r8, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r4, #0
+ movlt r6, r9
+ str r3, [r0, #32]
+ movlt r2, r1
+ str r2, [r0, #36]
+ str r6, [r0, #40]
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end169:
+ .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub11L
+ .align 2
+ .type mcl_fp_sub11L,%function
+mcl_fp_sub11L: @ @mcl_fp_sub11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r10, r3
+ ldr r12, [r2]
+ ldr r9, [r2, #4]
+ ldr r8, [r2, #8]
+ ldr r3, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r12
+ sbcs r5, r5, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, r8
+ str r5, [sp, #44] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ add r8, r1, #32
+ sbcs r12, r7, r3
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ ldr r7, [r1, #16]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs r11, r7, r6
+ ldr r6, [r1, #20]
+ ldr r7, [r2, #40]
+ sbcs r9, r6, r5
+ ldr r5, [r1, #24]
+ sbcs r6, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ str r6, [sp, #28] @ 4-byte Spill
+ sbcs lr, r5, r4
+ ldr r4, [r2, #36]
+ ldr r5, [r2, #32]
+ str lr, [sp, #20] @ 4-byte Spill
+ str r4, [sp, #32] @ 4-byte Spill
+ ldm r8, {r2, r4, r8}
+ str r3, [r0]
+ sbcs r1, r2, r5
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r2, r4, r2
+ mov r4, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ sbcs r8, r8, r7
+ mov r7, #0
+ sbc r7, r7, #0
+ tst r7, #1
+ str r3, [r0, #4]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #8]
+ add r3, r0, #32
+ str r12, [r0, #12]
+ str r11, [r0, #16]
+ str r9, [r0, #20]
+ str r6, [r0, #24]
+ str lr, [r0, #28]
+ stm r3, {r1, r2, r8}
+ beq .LBB170_2
+@ BB#1: @ %carry
+ ldr r3, [r10, #32]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r10, #36]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r10, #40]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldmib r10, {r5, lr}
+ ldr r3, [r10, #20]
+ ldr r6, [sp, #44] @ 4-byte Reload
+ ldr r7, [r10, #12]
+ ldr r12, [r10, #16]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r10, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r10, #28]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r10]
+ adds r3, r3, r4
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r5, r5, r6
+ stm r0, {r3, r5}
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r4, lr, r4
+ str r4, [r0, #8]
+ adcs r3, r7, r3
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r3, [r0, #12]
+ adcs r3, r12, r11
+ str r3, [r0, #16]
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r3, r3, r9
+ str r3, [r0, #20]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r7, r3
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r3, r7, r3
+ str r3, [r0, #28]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ add r0, r0, #32
+ adcs r1, r3, r1
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adc r3, r3, r8
+ stm r0, {r1, r2, r3}
+.LBB170_2: @ %nocarry
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end170:
+ .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF11L
+ .align 2
+ .type mcl_fp_subNF11L,%function
+mcl_fp_subNF11L: @ @mcl_fp_subNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r12, r0
+ ldr r0, [r2, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r2, {r8, r10}
+ ldr r0, [r2, #8]
+ ldr r5, [r2, #16]
+ ldr r11, [r2, #20]
+ ldr lr, [r1, #16]
+ ldr r6, [r1, #20]
+ ldr r9, [r1, #24]
+ ldr r7, [r1, #28]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ ldr r2, [r1, #8]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r1, {r1, r4}
+ subs r1, r1, r8
+ sbcs r8, r4, r10
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r8, [sp, #16] @ 4-byte Spill
+ sbcs r2, r2, r4
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r4, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r5, lr, r5
+ ldr lr, [r3, #12]
+ str r4, [sp, #20] @ 4-byte Spill
+ sbcs r11, r6, r11
+ mov r6, r1
+ str r5, [sp, #28] @ 4-byte Spill
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs r0, r9, r0
+ ldr r9, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r2, r0
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r10, r2, r0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r10, [sp, #48] @ 4-byte Spill
+ sbc r0, r7, r2
+ ldr r2, [r3, #36]
+ ldr r7, [r3, #4]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ ldr r3, [r3]
+ adds r1, r6, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r7
+ adcs r2, r9, r2
+ adcs lr, r4, lr
+ adcs r4, r5, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r5, r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r8, r0, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r3, [sp, #60] @ 4-byte Reload
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r3, r0
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r10, r0, r3
+ asr r3, r0, #31
+ ldr r0, [sp, #16] @ 4-byte Reload
+ cmp r3, #0
+ movge r1, r6
+ movge r2, r9
+ str r1, [r12]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ movge r7, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ cmp r3, #0
+ str r7, [r12, #4]
+ str r2, [r12, #8]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ movge lr, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str lr, [r12, #12]
+ movge r4, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r4, [r12, #16]
+ movge r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r12, #20]
+ movge r8, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r8, [r12, #24]
+ movge r11, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movge r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ cmp r3, #0
+ str r11, [r12, #28]
+ movge r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ movge r10, r2
+ add r2, r12, #32
+ stm r2, {r0, r1, r10}
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end171:
+ .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add11L
+ .align 2
+ .type mcl_fpDbl_add11L,%function
+mcl_fpDbl_add11L: @ @mcl_fpDbl_add11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ ldm r1, {r7, r12, lr}
+ ldr r8, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ ldr r10, [r2, #20]
+ adds r4, r4, r7
+ adcs r7, r5, r12
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r6, lr
+ add lr, r1, #16
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r9, r8
+ add r8, r1, #32
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r4, [sp, #108] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #72]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r4, [sp, #96] @ 4-byte Spill
+ ldr r4, [r2, #76]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r4, [sp, #116] @ 4-byte Spill
+ ldr r4, [r2, #80]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #84]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r8, {r5, r6, r8}
+ ldr r2, [r1, #44]
+ ldr r11, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r9, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r9, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adcs r2, r10, r2
+ add r10, r3, #32
+ str r7, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ adcs r2, r2, lr
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [r0, #32]
+ adcs r2, r2, r6
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r8
+ adcs r6, r2, r7
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r6, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r2, r1, r11
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #80] @ 4-byte Spill
+ adcs r5, r1, r7
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r8, r1, r7
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r8, [sp, #84] @ 4-byte Spill
+ adcs r1, r1, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r12, [sp, #96] @ 4-byte Spill
+ adcs r1, r1, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #112] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldmib r3, {r1, r9, lr}
+ ldr r7, [r3, #16]
+ ldr r11, [r3]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r3, #20]
+ subs r11, r6, r11
+ sbcs r1, r4, r1
+ sbcs r4, r2, r9
+ sbcs r2, r5, lr
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldm r10, {r5, r9, r10}
+ ldr r3, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #60] @ 4-byte Reload
+ sbcs r7, r8, r3
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbcs r8, r3, r6
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs r3, r3, r6
+ ldr r6, [sp, #68] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ sbcs lr, r6, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r9, r5, r9
+ ldr r5, [sp, #72] @ 4-byte Reload
+ sbcs r10, r6, r10
+ ldr r6, [sp, #88] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r11, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r11, [r0, #44]
+ movne r1, r5
+ str r1, [r0, #48]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ movne r4, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r0, #52]
+ movne r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r7, [r0, #60]
+ movne r8, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r6, #0
+ str r8, [r0, #64]
+ movne r3, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r3, [r0, #68]
+ movne r12, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r12, [r0, #72]
+ movne lr, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r6, #0
+ str lr, [r0, #76]
+ movne r9, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r9, [r0, #80]
+ movne r10, r1
+ str r10, [r0, #84]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end172:
+ .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub11L
+ .align 2
+ .type mcl_fpDbl_sub11L,%function
+mcl_fpDbl_sub11L: @ @mcl_fpDbl_sub11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ ldr r7, [r2, #64]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2]
+ ldmib r2, {r4, r8, r10}
+ ldm r1, {r5, r6, r12, lr}
+ ldr r9, [r2, #20]
+ subs r5, r5, r7
+ ldr r7, [r2, #24]
+ sbcs r4, r6, r4
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #32]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ sbcs r8, r12, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ sbcs r7, lr, r10
+ add r10, r1, #32
+ add lr, r1, #16
+ str r5, [sp, #40] @ 4-byte Spill
+ str r7, [sp] @ 4-byte Spill
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r10, {r5, r6, r10}
+ ldr r2, [r1, #44]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r11, [r0]
+ stmib r0, {r7, r8}
+ sbcs r1, r1, r4
+ mov r8, #0
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r2, r2, r9
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r2, r6, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ sbcs r1, r10, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ sbcs r2, r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #68] @ 4-byte Spill
+ sbcs r9, r7, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r12, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r12, [sp, #80] @ 4-byte Spill
+ sbcs lr, r7, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str lr, [sp, #84] @ 4-byte Spill
+ sbcs r5, r7, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #96] @ 4-byte Spill
+ sbcs r6, r7, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #100] @ 4-byte Spill
+ sbcs r11, r7, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r11, [sp, #104] @ 4-byte Spill
+ sbcs r1, r7, r1
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ sbcs r10, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ sbcs r1, r7, r1
+ ldr r7, [r3, #4]
+ str r1, [sp, #116] @ 4-byte Spill
+ sbc r1, r8, #0
+ ldr r8, [r3, #28]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r3, [r3]
+ str r1, [sp, #64] @ 4-byte Spill
+ adds r1, r4, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r7, r2, r7
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r2, r9, r2
+ adcs r3, r12, r3
+ adcs r12, lr, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs lr, r6, r5
+ ldr r6, [sp, #112] @ 4-byte Reload
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs r11, r6, r5
+ ldr r6, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r10, r10, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adc r6, r5, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ ands r5, r6, #1
+ ldr r6, [sp, #40] @ 4-byte Reload
+ moveq r2, r9
+ moveq r1, r6
+ str r1, [r0, #44]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ moveq r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ cmp r5, #0
+ str r7, [r0, #48]
+ str r2, [r0, #52]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [r0, #56]
+ moveq r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #60]
+ moveq r4, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r5, #0
+ str r4, [r0, #64]
+ moveq lr, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #68]
+ moveq r8, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r8, [r0, #72]
+ moveq r11, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ cmp r5, #0
+ str r11, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r10, [r0, #80]
+ moveq r2, r1
+ str r2, [r0, #84]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv384x32,%function
+.LmulPv384x32: @ @mulPv384x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r1, [r1, #44]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #44]
+ adc r1, r7, #0
+ str r1, [r0, #48]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre12L
+ .align 2
+ .type mcl_fp_mulUnitPre12L,%function
+mcl_fp_mulUnitPre12L: @ @mcl_fp_mulUnitPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv384x32(PLT)
+ ldr r12, [sp, #48]
+ ldr lr, [sp, #44]
+ ldr r8, [sp, #40]
+ ldr r9, [sp, #36]
+ ldr r10, [sp, #32]
+ ldr r11, [sp, #28]
+ ldr r5, [sp, #24]
+ ldr r6, [sp, #20]
+ ldm sp, {r2, r3}
+ add r7, sp, #8
+ ldm r7, {r0, r1, r7}
+ stm r4, {r2, r3}
+ add r2, r4, #8
+ stm r2, {r0, r1, r7}
+ str r6, [r4, #20]
+ str r5, [r4, #24]
+ str r11, [r4, #28]
+ str r10, [r4, #32]
+ str r9, [r4, #36]
+ str r8, [r4, #40]
+ str lr, [r4, #44]
+ str r12, [r4, #48]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre12L
+ .align 2
+ .type mcl_fpDbl_mulPre12L,%function
+mcl_fpDbl_mulPre12L: @ @mcl_fpDbl_mulPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #196
+ sub sp, sp, #196
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r0, r4, #48
+ add r1, r5, #24
+ add r2, r6, #24
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add lr, r6, #24
+ ldr r8, [r6, #40]
+ ldr r9, [r6, #44]
+ ldr r2, [r6, #16]
+ ldr r3, [r6, #20]
+ ldm lr, {r0, r1, r12, lr}
+ ldm r6, {r6, r7, r10, r11}
+ adds r0, r6, r0
+ adcs r1, r7, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs r12, r10, r12
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r10, [r5, #36]
+ adcs r0, r11, lr
+ add lr, r5, #8
+ str r12, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, r2, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ adcs r0, r3, r9
+ ldr r9, [r5, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r6, r0, #0
+ ldr r0, [r5, #40]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #44]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm lr, {r3, r11, lr}
+ ldr r8, [r5, #20]
+ ldr r0, [r5, #24]
+ ldr r2, [r5, #28]
+ ldm r5, {r5, r7}
+ adds r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #124]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r7, r7, r2
+ add r2, sp, #100
+ adcs r9, r3, r9
+ str r7, [sp, #128]
+ adcs r11, r11, r10
+ str r9, [sp, #132]
+ str r5, [sp, #100]
+ str r1, [sp, #104]
+ str r12, [sp, #108]
+ add r1, sp, #124
+ str r11, [sp, #136]
+ adcs r10, lr, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r10, [sp, #140]
+ adcs r8, r8, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r8, [sp, #144]
+ str r0, [sp, #112]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #116]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #120]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #148
+ bl mcl_fpDbl_mulPre6L(PLT)
+ cmp r6, #0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r3, [sp, #92] @ 4-byte Reload
+ moveq r8, r6
+ moveq r10, r6
+ moveq r11, r6
+ moveq r9, r6
+ moveq r7, r6
+ cmp r6, #0
+ moveq r0, r6
+ adds r2, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r1, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r12, r9, r0
+ adcs r3, r11, r3
+ adcs lr, r10, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r5, r0, #0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r0, #0
+ and r6, r6, r0
+ moveq r1, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ moveq r12, r9
+ ldr r9, [sp, #92] @ 4-byte Reload
+ moveq lr, r10
+ moveq r3, r11
+ moveq r2, r7
+ ldr r7, [sp, #172]
+ cmp r0, #0
+ moveq r9, r8
+ moveq r5, r0
+ adds r8, r2, r7
+ ldr r7, [sp, #176]
+ adcs r10, r1, r7
+ ldr r7, [sp, #180]
+ adcs r0, r12, r7
+ ldr r7, [sp, #184]
+ str r0, [sp, #96] @ 4-byte Spill
+ adcs r0, r3, r7
+ ldr r7, [sp, #188]
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, lr, r7
+ ldr r7, [sp, #192]
+ str r0, [sp, #84] @ 4-byte Spill
+ adcs r0, r9, r7
+ ldr r7, [r4]
+ str r0, [sp, #80] @ 4-byte Spill
+ adc r0, r5, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldmib r4, {r6, r9, lr}
+ ldr r0, [sp, #148]
+ ldr r5, [sp, #152]
+ ldr r1, [sp, #156]
+ ldr r2, [sp, #160]
+ ldr r11, [r4, #24]
+ subs r3, r0, r7
+ ldr r0, [r4, #16]
+ sbcs r12, r5, r6
+ ldr r5, [r4, #68]
+ sbcs r6, r1, r9
+ ldr r1, [sp, #164]
+ ldr r9, [r4, #32]
+ sbcs r2, r2, lr
+ ldr lr, [r4, #72]
+ str r5, [sp, #56] @ 4-byte Spill
+ sbcs r7, r1, r0
+ ldr r0, [r4, #20]
+ ldr r1, [sp, #168]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ sbcs r0, r8, r11
+ ldr r8, [r4, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r10, r8
+ ldr r10, [r4, #52]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #96] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r0, [sp, #92] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #92]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ sbc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ subs r0, r3, r0
+ ldr r3, [r4, #80]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r12, r10
+ ldr r12, [r4, #76]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #56]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r6, r0
+ ldr r6, [r4, #64]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #60]
+ str r6, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #84]
+ sbcs r7, r7, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r2, [sp, #68] @ 4-byte Spill
+ sbcs r6, r6, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ sbcs r5, r5, lr
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r5, r12
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r5, r5, r3
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp, #40] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r0
+ str r2, [sp, #52] @ 4-byte Spill
+ mov r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adds r11, r11, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r11, [r4, #24]
+ adcs r8, r8, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r8, [r4, #28]
+ adcs r9, r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [r4, #32]
+ adcs r5, r0, r1
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ str r5, [r4, #36]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r7, [r4, #40]
+ adcs r6, r0, r6
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r6, [r4, #44]
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r1, r10, r1
+ adcs r0, r0, r5
+ str r1, [r4, #52]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r4, #64]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [r4, #68]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r4, #72]
+ adcs r0, r12, #0
+ str r0, [r4, #76]
+ adcs r0, r3, #0
+ str r0, [r4, #80]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #84]
+ adcs r0, r2, #0
+ adc r1, r1, #0
+ str r0, [r4, #88]
+ str r1, [r4, #92]
+ add sp, sp, #196
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre12L
+ .align 2
+ .type mcl_fpDbl_sqrPre12L,%function
+mcl_fpDbl_sqrPre12L: @ @mcl_fpDbl_sqrPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #204
+ sub sp, sp, #204
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r1, r5, #24
+ add r0, r4, #48
+ mov r2, r1
+ bl mcl_fpDbl_mulPre6L(PLT)
+ ldr r10, [r5, #32]
+ ldr r9, [r5, #36]
+ ldr lr, [r5, #40]
+ ldr r12, [r5, #44]
+ ldr r3, [r5, #8]
+ ldr r2, [r5, #12]
+ ldr r1, [r5, #16]
+ ldr r11, [r5, #20]
+ ldr r6, [r5, #24]
+ ldr r0, [r5, #28]
+ ldm r5, {r5, r7}
+ adds r8, r5, r6
+ adcs r6, r7, r0
+ mov r0, #0
+ str r8, [sp, #132]
+ str r8, [sp, #108]
+ adcs r10, r3, r10
+ str r6, [sp, #136]
+ str r6, [sp, #112]
+ adcs r5, r2, r9
+ add r2, sp, #108
+ str r10, [sp, #140]
+ str r10, [sp, #116]
+ adcs r9, r1, lr
+ add r1, sp, #132
+ str r5, [sp, #144]
+ str r5, [sp, #120]
+ adcs r7, r11, r12
+ str r9, [sp, #148]
+ str r9, [sp, #124]
+ adc r11, r0, #0
+ add r0, sp, #156
+ str r7, [sp, #152]
+ str r7, [sp, #128]
+ bl mcl_fpDbl_mulPre6L(PLT)
+ adds r0, r9, r9
+ ldr lr, [sp, #192]
+ ldr r12, [sp, #196]
+ ldr r9, [sp, #200]
+ orr r0, r0, r5, lsr #31
+ str r0, [sp, #104] @ 4-byte Spill
+ adc r0, r7, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ adds r0, r10, r10
+ ldr r10, [sp, #180]
+ adc r1, r5, r5
+ orr r0, r0, r6, lsr #31
+ str r1, [sp, #92] @ 4-byte Spill
+ adds r1, r8, r8
+ ldr r8, [sp, #184]
+ adc r5, r6, r6
+ ldr r6, [sp, #188]
+ adds r1, r10, r1
+ str r1, [sp, #96] @ 4-byte Spill
+ adcs r3, r8, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r2, r6, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r1, lr, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r5, r9, r5
+ adc r7, r11, r7, lsr #31
+ cmp r11, #0
+ moveq r3, r8
+ moveq r2, r6
+ moveq r5, r9
+ moveq r0, r12
+ moveq r1, lr
+ cmp r11, #0
+ ldr r6, [sp, #96] @ 4-byte Reload
+ mov r8, r3
+ add r3, sp, #156
+ str r0, [sp, #104] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ mov r9, r5
+ ldm r4, {r12, lr}
+ moveq r7, r11
+ ldr r11, [r4, #8]
+ ldr r5, [r4, #12]
+ moveq r6, r10
+ ldm r3, {r0, r1, r2, r3}
+ ldr r10, [r4, #64]
+ subs r12, r0, r12
+ ldr r0, [r4, #16]
+ sbcs lr, r1, lr
+ ldr r1, [sp, #172]
+ sbcs r2, r2, r11
+ ldr r11, [r4, #48]
+ sbcs r3, r3, r5
+ ldr r5, [r4, #68]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #176]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ str r5, [sp, #60] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ str r0, [sp, #96] @ 4-byte Spill
+ sbcs r0, r6, r0
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r8, r0
+ ldr r8, [r4, #56]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #32]
+ str r0, [sp, #92] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #40]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ sbcs r0, r0, r1
+ ldr r1, [r4, #92]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r1, [sp, #84] @ 4-byte Spill
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r9, r0
+ ldr r9, [r4, #60]
+ str r0, [sp, #40] @ 4-byte Spill
+ sbc r0, r7, #0
+ ldr r7, [r4, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ subs r0, r12, r11
+ ldr r12, [r4, #76]
+ str r0, [sp, #32] @ 4-byte Spill
+ sbcs r0, lr, r7
+ ldr lr, [r4, #72]
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r2, r8
+ ldr r2, [r4, #84]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r3, r9
+ ldr r3, [r4, #80]
+ sbcs r6, r6, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #80] @ 4-byte Spill
+ sbcs r5, r6, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r5, r5, lr
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #56] @ 4-byte Reload
+ sbcs r5, r5, r12
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r5, r3
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r2, r2, r0
+ str r2, [sp, #64] @ 4-byte Spill
+ mov r2, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [r4, #24]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r6, r1, r5
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r6, [r4, #28]
+ adcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r6, r1, r5
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r6, [r4, #36]
+ adcs r0, r0, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r5, [r4, #44]
+ str r0, [r4, #48]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r1, r7, r1
+ str r1, [r4, #52]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [r4, #56]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [r4, #60]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [r4, #64]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [r4, #68]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r4, #72]
+ adcs r0, r12, #0
+ str r0, [r4, #76]
+ adcs r0, r3, #0
+ str r0, [r4, #80]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #84]
+ adcs r0, r2, #0
+ adc r1, r1, #0
+ str r0, [r4, #88]
+ str r1, [r4, #92]
+ add sp, sp, #204
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont12L
+ .align 2
+ .type mcl_fp_mont12L,%function
+mcl_fp_mont12L: @ @mcl_fp_mont12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #428
+ sub sp, sp, #428
+ .pad #1024
+ sub sp, sp, #1024
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1392
+ str r3, [sp, #100] @ 4-byte Spill
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r4, r3
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r6, [sp, #1392]
+ add r11, sp, #1024
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ mul r2, r6, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r11, #312
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1384]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r5, [sp, #1360]
+ ldr r8, [sp, #1356]
+ ldr r7, [sp, #1352]
+ ldr r10, [sp, #1336]
+ ldr r9, [sp, #1340]
+ ldr r4, [sp, #1344]
+ ldr r11, [sp, #1348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1280
+ bl .LmulPv384x32(PLT)
+ adds r0, r10, r6
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1296]
+ ldr r12, [sp, #1300]
+ ldr lr, [sp, #1304]
+ ldr r6, [sp, #1312]
+ ldr r10, [sp, #1328]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1324]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1280]
+ adcs r1, r11, r1
+ ldr r11, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #1316]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r8, r1
+ ldr r8, [sp, #1320]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #1308]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1292]
+ adc r0, r0, #0
+ adds r11, r11, r4
+ ldr r4, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #1288]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r7, #200
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1272]
+ add r9, sp, #1232
+ ldr r5, [sp, #1248]
+ ldr r8, [sp, #1244]
+ ldr r10, [sp, #1224]
+ ldr r11, [sp, #1228]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1168
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #1168
+ ldr r10, [sp, #1212]
+ ldr r4, [sp, #1192]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #1216]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1200]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1208]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1204]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1196]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ add r5, sp, #1024
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, r5, #88
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1160]
+ add r10, sp, #1120
+ ldr r6, [sp, #1136]
+ ldr r9, [sp, #1132]
+ ldr r11, [sp, #1112]
+ ldr r7, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1056
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1068]
+ ldr r3, [sp, #1072]
+ ldr r12, [sp, #1076]
+ ldr lr, [sp, #1080]
+ ldr r8, [sp, #1096]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1092]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1056]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1084]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1104]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1100]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1064]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1000
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1048]
+ add r9, sp, #1008
+ ldr r5, [sp, #1024]
+ ldr r8, [sp, #1020]
+ ldr r10, [sp, #1000]
+ ldr r11, [sp, #1004]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #944
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #944
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #968
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #888
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #936]
+ add r10, sp, #896
+ ldr r6, [sp, #912]
+ ldr r9, [sp, #908]
+ ldr r11, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #832
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #836
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #860
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #832]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r9, sp, #784
+ ldr r5, [sp, #800]
+ ldr r8, [sp, #796]
+ ldr r10, [sp, #776]
+ ldr r11, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #720
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #744
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r10, sp, #672
+ ldr r6, [sp, #688]
+ ldr r9, [sp, #684]
+ ldr r11, [sp, #664]
+ ldr r7, [sp, #668]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #612
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #636
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #608]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r9, sp, #560
+ ldr r5, [sp, #576]
+ ldr r8, [sp, #572]
+ ldr r10, [sp, #552]
+ ldr r11, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #496
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #520
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r10, sp, #448
+ ldr r6, [sp, #464]
+ ldr r9, [sp, #460]
+ ldr r11, [sp, #440]
+ ldr r7, [sp, #444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #388
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #412
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #384]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r2, r11, r6
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r5, [sp, #348]
+ ldr r9, [sp, #344]
+ ldr r10, [sp, #328]
+ ldr r11, [sp, #332]
+ ldr r8, [sp, #336]
+ ldr r7, [sp, #340]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add r12, sp, #288
+ ldr lr, [sp, #276]
+ ldr r4, [sp, #284]
+ ldr r10, [sp, #312]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r1, r0, r11
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #316]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #320]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #280]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ adds r0, r1, r2
+ mul r11, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r6, [sp, #308]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adcs r7, r7, lr
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r7, r7, r5
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r4
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #264]
+ add r10, sp, #220
+ ldr r6, [sp, #244]
+ ldr r7, [sp, #240]
+ ldr r8, [sp, #236]
+ ldr r9, [sp, #232]
+ ldr r11, [sp, #216]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #160
+ add r12, sp, #176
+ adds r0, r0, r11
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r11, r0, r5
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #196
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm lr, {r2, r7, lr}
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r6, [sp, #172]
+ adds r4, r4, r2
+ mul r1, r4, r0
+ adcs r7, r11, r7
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r11, r7, lr
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r7, r0, r5
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r10
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv384x32(PLT)
+ add r5, sp, #104
+ mov r3, r6
+ ldm r5, {r0, r1, r2, r5}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r4, r11, r2
+ str lr, [sp, #44] @ 4-byte Spill
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs r2, r0, r5
+ ldr r0, [sp, #120]
+ str r2, [sp, #52] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #124]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #132]
+ adcs r12, r1, r0
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r12, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r8, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r3, {r0, r1, r7, r10}
+ ldr r11, [r3]
+ ldr r6, [r3, #24]
+ ldr r9, [r3, #20]
+ ldr r8, [r3, #36]
+ subs r11, lr, r11
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr lr, [r3, #44]
+ sbcs r0, r4, r0
+ ldr r4, [sp, #72] @ 4-byte Reload
+ sbcs r1, r2, r1
+ sbcs r2, r5, r7
+ ldr r7, [r3, #32]
+ ldr r5, [r3, #40]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #40] @ 4-byte Spill
+ sbcs r10, r3, r10
+ ldr r3, [sp, #84] @ 4-byte Reload
+ sbcs r6, r3, r9
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r9, [sp, #40] @ 4-byte Reload
+ sbcs r3, r12, r3
+ ldr r12, [sp, #88] @ 4-byte Reload
+ sbcs r12, r12, r9
+ sbcs r7, r4, r7
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ sbcs r9, r4, r8
+ ldr r4, [sp, #96] @ 4-byte Reload
+ sbcs r8, r4, r5
+ ldr r4, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ sbcs lr, r4, lr
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbc r4, r4, #0
+ ands r4, r4, #1
+ movne r11, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ movne r0, r7
+ str r11, [r5]
+ str r0, [r5, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r5, #8]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r2, [r5, #12]
+ movne r10, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r10, [r5, #16]
+ movne r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r4, #0
+ str r6, [r5, #20]
+ movne r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r3, [r5, #24]
+ movne r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r12, [r5, #28]
+ movne r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r5, #32]
+ movne r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [r5, #36]
+ movne r8, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r8, [r5, #40]
+ movne lr, r0
+ str lr, [r5, #44]
+ add sp, sp, #428
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end178:
+ .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF12L
+ .align 2
+ .type mcl_fp_montNF12L,%function
+mcl_fp_montNF12L: @ @mcl_fp_montNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #428
+ sub sp, sp, #428
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #92
+ mov r4, r3
+ mov r7, r1
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1392
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r8, [sp, #1392]
+ add r10, sp, #1024
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ mul r2, r8, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r10, #312
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1384]
+ add r11, sp, #1344
+ ldr r9, [sp, #1356]
+ ldr r4, [sp, #1336]
+ ldr r6, [sp, #1340]
+ mov r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r10, r11}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1280
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #1280
+ ldr r7, [sp, #1316]
+ ldr r4, [sp, #1304]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r8, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r6, [sp, #1312]
+ adcs r0, r5, r0
+ ldr r5, [sp, #1308]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1324]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #1328]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1320]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r8, r8, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ add r5, sp, #1024
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r5, #200
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1272]
+ add r10, sp, #1232
+ ldr r6, [sp, #1248]
+ ldr r9, [sp, #1244]
+ ldr r11, [sp, #1224]
+ ldr r7, [sp, #1228]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1168
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1180]
+ ldr r3, [sp, #1184]
+ ldr r12, [sp, #1188]
+ ldr lr, [sp, #1192]
+ ldr r8, [sp, #1208]
+ ldr r11, [sp, #1216]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1204]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1168]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1196]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1212]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1200]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #1176]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1172]
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, r7, #88
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1160]
+ add r9, sp, #1120
+ ldr r5, [sp, #1136]
+ ldr r8, [sp, #1132]
+ ldr r10, [sp, #1112]
+ ldr r11, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1056
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #1056
+ ldr r10, [sp, #1100]
+ ldr r4, [sp, #1080]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #1104]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1096]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1092]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1084]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1000
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1048]
+ add r10, sp, #1008
+ ldr r6, [sp, #1024]
+ ldr r9, [sp, #1020]
+ ldr r11, [sp, #1000]
+ ldr r7, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #944
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #972
+ add lr, sp, #948
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #944]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #888
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #936]
+ add r9, sp, #896
+ ldr r5, [sp, #912]
+ ldr r8, [sp, #908]
+ ldr r10, [sp, #888]
+ ldr r11, [sp, #892]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #832
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #832
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #856
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r10, sp, #784
+ ldr r6, [sp, #800]
+ ldr r9, [sp, #796]
+ ldr r11, [sp, #776]
+ ldr r7, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #748
+ add lr, sp, #724
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #720]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r9, sp, #672
+ ldr r5, [sp, #688]
+ ldr r8, [sp, #684]
+ ldr r10, [sp, #664]
+ ldr r11, [sp, #668]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #608
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #632
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r10, sp, #560
+ ldr r6, [sp, #576]
+ ldr r9, [sp, #572]
+ ldr r11, [sp, #552]
+ ldr r7, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #524
+ add lr, sp, #500
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #496]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r9, sp, #448
+ ldr r5, [sp, #464]
+ ldr r8, [sp, #460]
+ ldr r10, [sp, #440]
+ ldr r11, [sp, #444]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #384
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #408
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r7, r4
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r6, [sp, #348]
+ ldr r10, [sp, #344]
+ ldr r11, [sp, #328]
+ ldr r7, [sp, #332]
+ ldr r9, [sp, #336]
+ ldr r5, [sp, #340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ ldr lr, [sp, #276]
+ add r12, sp, #288
+ ldr r8, [sp, #316]
+ ldr r11, [sp, #312]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r7, r1, r9
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r9, [sp, #320]
+ adcs r1, r1, r5
+ ldr r5, [sp, #280]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #272]
+ str r1, [sp, #36] @ 4-byte Spill
+ adds r0, r0, r2
+ adcs r7, r7, lr
+ mul r10, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #308]
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r7, r7, r5
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #244]
+ ldr r6, [sp, #240]
+ ldr r8, [sp, #236]
+ ldr r9, [sp, #232]
+ ldr r10, [sp, #216]
+ ldr r7, [sp, #220]
+ ldr r4, [sp, #224]
+ ldr r11, [sp, #228]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add r12, sp, #176
+ ldr lr, [sp, #164]
+ adds r0, r0, r10
+ add r10, sp, #200
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #172]
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #168]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #160]
+ str r1, [sp, #48] @ 4-byte Spill
+ adds r4, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r11, [sp, #196]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r5, r5, lr
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r9
+ adc r0, r10, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv384x32(PLT)
+ add r6, sp, #104
+ ldm r6, {r0, r1, r2, r6}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #120]
+ mov r2, r5
+ adcs r3, r7, r6
+ str r10, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #124]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132]
+ adcs r12, r1, r0
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r11, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adc r0, r1, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldmib r2, {r0, r1, r7, r9}
+ ldr r4, [r2, #24]
+ ldr r8, [r2]
+ ldr r5, [r2, #20]
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ subs r8, lr, r8
+ sbcs r0, r10, r0
+ sbcs r1, r3, r1
+ sbcs r7, r6, r7
+ str r4, [sp, #48] @ 4-byte Spill
+ mov r4, r2
+ ldr r2, [r4, #44]
+ ldr r10, [r4, #32]
+ ldr r6, [r4, #36]
+ ldr r11, [r4, #40]
+ ldr r4, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #76] @ 4-byte Reload
+ sbcs r9, r2, r9
+ ldr r2, [sp, #80] @ 4-byte Reload
+ sbcs r5, r2, r5
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r3, r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r12, r2, r4
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r4, [sp, #40] @ 4-byte Reload
+ sbcs r10, r2, r10
+ ldr r2, [sp, #72] @ 4-byte Reload
+ sbcs r2, r2, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r2, r2, r11
+ ldr r11, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbc r2, r2, r4
+ asr r4, r2, #31
+ cmp r4, #0
+ movlt r8, lr
+ movlt r0, r6
+ str r8, [r11]
+ str r0, [r11, #4]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ movlt r1, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r11, #8]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movlt r7, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [r11, #12]
+ movlt r9, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r9, [r11, #16]
+ movlt r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r4, #0
+ str r5, [r11, #20]
+ movlt r3, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r3, [r11, #24]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ movlt r12, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r12, [r11, #28]
+ movlt r10, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r4, #0
+ str r10, [r11, #32]
+ movlt r3, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [r11, #36]
+ movlt r1, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [r11, #40]
+ movlt r2, r0
+ str r2, [r11, #44]
+ add sp, sp, #428
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end179:
+ .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed12L
+ .align 2
+ .type mcl_fp_montRed12L,%function
+mcl_fp_montRed12L: @ @mcl_fp_montRed12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #836
+ sub sp, sp, #836
+ mov r3, r2
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r10, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #152] @ 4-byte Spill
+ mov r5, r3
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #156] @ 4-byte Spill
+ mul r2, r10, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r11, sp, #808
+ add lr, sp, #776
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #804]
+ ldr r4, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #768]
+ add lr, sp, #756
+ add r9, sp, #732
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #720]
+ ldr r6, [sp, #752]
+ ldr r11, [sp, #748]
+ ldr r2, [sp, #744]
+ ldr r1, [sp, #724]
+ ldr r7, [sp, #728]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r4, r5
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r11, sp, #696
+ add lr, sp, #664
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #692]
+ ldr r5, [sp, #688]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #656]
+ add lr, sp, #644
+ add r9, sp, #620
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #608]
+ ldr r6, [sp, #640]
+ ldr r11, [sp, #636]
+ ldr r2, [sp, #632]
+ ldr r1, [sp, #612]
+ ldr r7, [sp, #616]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ mov r0, r5
+ mul r2, r10, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r11, sp, #584
+ add lr, sp, #552
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #580]
+ ldr r5, [sp, #576]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #544]
+ add lr, sp, #532
+ add r9, sp, #508
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #496]
+ ldr r6, [sp, #528]
+ ldr r11, [sp, #524]
+ ldr r2, [sp, #520]
+ ldr r1, [sp, #500]
+ ldr r7, [sp, #504]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r11, sp, #472
+ add lr, sp, #440
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #468]
+ ldr r5, [sp, #464]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #432]
+ add lr, sp, #420
+ add r9, sp, #396
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #384]
+ ldr r6, [sp, #416]
+ ldr r11, [sp, #412]
+ ldr r2, [sp, #408]
+ ldr r1, [sp, #388]
+ ldr r7, [sp, #392]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r4, r5
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ add r11, sp, #352
+ add lr, sp, #328
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #320]
+ add lr, sp, #300
+ add r6, sp, #272
+ add r12, sp, #284
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r4, r8, r9, r11, lr}
+ ldr r7, [sp, #296]
+ ldm r6, {r2, r3, r6}
+ ldm r12, {r0, r1, r12}
+ adds r2, r10, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r10, r2, r3
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r6, r2, r6
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r10, r4
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ add r7, sp, #216
+ add lr, sp, #252
+ ldm r7, {r0, r1, r3, r7}
+ ldr r8, [sp, #264]
+ adds r0, r10, r0
+ adcs r10, r6, r1
+ mul r0, r10, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #232
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ ldr r4, [sp, #96] @ 4-byte Reload
+ adcs r9, r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r4, r0, r3
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r5, r0, r7
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ add r3, sp, #160
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #156] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #176]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r12, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r7, r0, r2
+ ldr r2, [sp, #180]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r8, r0, r2
+ ldr r2, [sp, #184]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r4, r4, r2
+ ldr r2, [sp, #188]
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r5, r5, r2
+ ldr r2, [sp, #192]
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r6, r6, r2
+ ldr r2, [sp, #196]
+ str r6, [sp, #76] @ 4-byte Spill
+ adcs r9, r0, r2
+ ldr r2, [sp, #200]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs r10, r0, r2
+ ldr r2, [sp, #204]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r10, [sp, #96] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r2, [sp, #208]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str lr, [sp, #156] @ 4-byte Spill
+ adcs r11, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #136] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ subs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ sbcs r1, r12, r1
+ sbcs r2, r3, r2
+ ldr r3, [sp, #120] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r12, r8, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r7, r4, r7
+ ldr r4, [sp, #132] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #100] @ 4-byte Reload
+ sbcs r8, r6, r5
+ ldr r6, [sp, #104] @ 4-byte Reload
+ sbcs r5, r9, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r5, [sp, #144] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r9, r10, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r6, lr, r6
+ mov lr, r11
+ ldr r11, [sp, #148] @ 4-byte Reload
+ str r6, [sp, #152] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ sbcs r10, lr, r6
+ sbc r6, r5, #0
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r0, r5
+ str r0, [r11]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r1, [r11, #4]
+ ldr r1, [sp, #156] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r2, [r11, #8]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ movne r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r3, [r11, #12]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r11, #16]
+ movne r7, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r6, #0
+ str r7, [r11, #20]
+ movne r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r11, #24]
+ movne r8, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r8, [r11, #28]
+ movne r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ cmp r6, #0
+ movne r10, lr
+ str r2, [r11, #32]
+ movne r9, r0
+ ldr r0, [sp, #152] @ 4-byte Reload
+ movne r0, r1
+ str r9, [r11, #36]
+ str r0, [r11, #40]
+ str r10, [r11, #44]
+ add sp, sp, #836
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end180:
+ .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre12L
+ .align 2
+ .type mcl_fp_addPre12L,%function
+mcl_fp_addPre12L: @ @mcl_fp_addPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r4, [r2, #16]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r8, r11, r3
+ ldr r3, [r2, #36]
+ ldr r11, [r2, #32]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ add lr, r1, #32
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #44]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r4, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r4, r10, lr}
+ ldr r9, [r1, #44]
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r5, [sp] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ adcs r2, r2, r12
+ str r2, [r0, #28]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r1, r11, r4
+ add r0, r0, #32
+ adcs r2, r2, r10
+ adcs r3, r3, lr
+ adcs r7, r7, r9
+ stm r0, {r1, r2, r3, r7}
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end181:
+ .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre12L
+ .align 2
+ .type mcl_fp_subPre12L,%function
+mcl_fp_subPre12L: @ @mcl_fp_subPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldmib r2, {r8, r12, lr}
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r11, [r2, #44]
+ ldr r9, [r1, #32]
+ ldr r10, [r1, #36]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r6, r6, r7
+ ldr r7, [r2, #32]
+ sbcs r5, r5, r8
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r12
+ add r12, r1, #16
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r1, #12]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ sbcs lr, r3, lr
+ ldm r12, {r1, r2, r3, r12}
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ sbcs r2, r2, r6
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ sbcs r2, r12, r2
+ ldr r1, [sp] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add r0, r0, #32
+ sbcs r1, r9, r1
+ sbcs r2, r10, r2
+ sbcs r3, r8, r3
+ sbcs r7, r7, r11
+ stm r0, {r1, r2, r3, r7}
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end182:
+ .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_12L
+ .align 2
+ .type mcl_fp_shr1_12L,%function
+mcl_fp_shr1_12L: @ @mcl_fp_shr1_12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #4
+ sub sp, sp, #4
+ add r6, r1, #20
+ ldr r3, [r1, #8]
+ ldr r2, [r1, #12]
+ ldr lr, [r1, #16]
+ add r11, r1, #32
+ ldm r6, {r4, r5, r6}
+ ldm r1, {r8, r12}
+ lsr r7, r12, #1
+ orr r9, r7, r3, lsl #31
+ ldm r11, {r7, r10, r11}
+ ldr r1, [r1, #44]
+ str r1, [sp] @ 4-byte Spill
+ lsr r1, r2, #1
+ lsrs r2, r2, #1
+ rrx r2, r3
+ lsrs r3, r12, #1
+ orr r1, r1, lr, lsl #31
+ rrx r3, r8
+ stm r0, {r3, r9}
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ lsrs r1, r4, #1
+ lsr r2, r10, #1
+ rrx r1, lr
+ orr r2, r2, r11, lsl #31
+ str r1, [r0, #16]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r6, #1
+ rrx r1, r5
+ str r1, [r0, #24]
+ lsr r1, r6, #1
+ orr r1, r1, r7, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r10, #1
+ add r0, r0, #32
+ rrx r1, r7
+ ldr r7, [sp] @ 4-byte Reload
+ lsrs r3, r7, #1
+ lsr r7, r7, #1
+ rrx r3, r11
+ stm r0, {r1, r2, r3, r7}
+ add sp, sp, #4
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end183:
+ .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add12L
+ .align 2
+ .type mcl_fp_add12L,%function
+mcl_fp_add12L: @ @mcl_fp_add12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r11, [r1, #40]
+ adds r5, r5, r12
+ ldr r12, [r2, #40]
+ adcs r4, r4, lr
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ ldr lr, [r1, #32]
+ adcs r6, r6, r8
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ ldr r8, [r1, #36]
+ adcs r7, r7, r9
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r10, r7, r6
+ ldr r6, [r2, #20]
+ adcs r7, r6, r4
+ ldr r4, [r2, #24]
+ str r7, [sp, #12] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r6, r5, r4
+ ldr r5, [r2, #32]
+ ldr r4, [r1, #44]
+ ldr r1, [r2, #36]
+ ldr r2, [r2, #44]
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r9, r5, lr
+ ldr lr, [sp, #32] @ 4-byte Reload
+ adcs r5, r1, r8
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #12] @ 4-byte Reload
+ adcs r11, r12, r11
+ ldr r12, [sp, #36] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ adcs r2, r2, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ str r1, [r0]
+ str r12, [r0, #4]
+ str lr, [r0, #8]
+ str r4, [r0, #12]
+ str r10, [r0, #16]
+ str r8, [r0, #20]
+ str r7, [r0, #24]
+ str r6, [r0, #28]
+ str r9, [r0, #32]
+ str r5, [r0, #36]
+ str r11, [r0, #40]
+ str r2, [r0, #44]
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r3, {r2, r6, r7}
+ ldr r5, [r3, #12]
+ subs r1, r1, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r12, r6
+ str r1, [sp] @ 4-byte Spill
+ sbcs r1, lr, r7
+ str r1, [sp, #36] @ 4-byte Spill
+ sbcs r1, r4, r5
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r10, r1
+ add r10, r3, #36
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r6, r8, r1
+ ldr r1, [r3, #24]
+ sbcs lr, r2, r1
+ ldr r2, [r3, #28]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r12, r1, r2
+ ldr r2, [r3, #32]
+ ldm r10, {r1, r4, r10}
+ sbcs r7, r9, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r2, r2, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r3, r11, r4
+ sbcs r5, r1, r10
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbc r1, r1, #0
+ tst r1, #1
+ bne .LBB184_2
+@ BB#1: @ %nocarry
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r1, [r0, #16]
+ str r6, [r0, #20]
+ str lr, [r0, #24]
+ str r12, [r0, #28]
+ str r7, [r0, #32]
+ add r0, r0, #36
+ stm r0, {r2, r3, r5}
+.LBB184_2: @ %carry
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end184:
+ .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF12L
+ .align 2
+ .type mcl_fp_addNF12L,%function
+mcl_fp_addNF12L: @ @mcl_fp_addNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldm r1, {r5, r8, lr}
+ ldr r6, [r2]
+ ldr r10, [r1, #12]
+ ldmib r2, {r4, r7, r9}
+ ldr r12, [r1, #20]
+ adds r6, r6, r5
+ ldr r5, [r1, #24]
+ adcs r8, r4, r8
+ ldr r4, [r2, #16]
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, lr
+ add lr, r2, #32
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ adcs r6, r9, r10
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #44]
+ adcs r7, r4, r7
+ ldr r4, [r1, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r12
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ ldr r5, [r1, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldm lr, {r1, r12, lr}
+ ldr r2, [r2, #44]
+ adcs r1, r1, r7
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r1, r12, r5
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, lr, r4
+ str r1, [sp, #36] @ 4-byte Spill
+ adc r1, r2, r6
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r3, {r1, r2, r6, r11}
+ ldr r7, [r3, #20]
+ ldr r4, [r3, #32]
+ ldr r9, [r3]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr lr, [r3, #24]
+ ldr r10, [r3, #28]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r3, #36]
+ subs r9, r5, r9
+ sbcs r1, r8, r1
+ sbcs r2, r7, r2
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #40]
+ sbcs r12, r7, r6
+ ldr r7, [r3, #44]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r11
+ sbcs r11, r4, r6
+ ldr r4, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs lr, r4, lr
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r10, r4, r10
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [sp] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbcs r6, r6, r4
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, r7
+ asr r7, r6, #31
+ cmp r7, #0
+ movlt r9, r5
+ movlt r1, r8
+ str r9, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ movlt r12, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #12]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #20]
+ movlt lr, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r10, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r2, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp r7, #0
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #32]
+ movlt r7, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #36]
+ movlt r3, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r3, [r0, #40]
+ movlt r6, r1
+ str r6, [r0, #44]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end185:
+ .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub12L
+ .align 2
+ .type mcl_fp_sub12L,%function
+mcl_fp_sub12L: @ @mcl_fp_sub12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r2]
+ ldmib r2, {r8, r12, lr}
+ ldm r1, {r4, r5, r6, r7}
+ add r10, r1, #32
+ subs r4, r4, r9
+ sbcs r5, r5, r8
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, r12
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, lr
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r11, r7, r6
+ ldr r6, [r1, #20]
+ str r11, [sp, #28] @ 4-byte Spill
+ sbcs lr, r6, r5
+ ldr r5, [r1, #24]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r7, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ str r7, [sp, #44] @ 4-byte Spill
+ add r7, r2, #32
+ sbcs r12, r5, r4
+ str r12, [sp, #36] @ 4-byte Spill
+ ldm r7, {r4, r5, r6, r7}
+ ldm r10, {r2, r8, r9, r10}
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r4, r2, r4
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [r0]
+ sbcs r8, r8, r5
+ str r4, [sp, #32] @ 4-byte Spill
+ sbcs r6, r9, r6
+ sbcs r7, r10, r7
+ ldr r10, [sp, #52] @ 4-byte Reload
+ str r10, [r0, #4]
+ str r2, [r0, #8]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r11, [r0, #16]
+ str lr, [r0, #20]
+ str r2, [r0, #24]
+ str r12, [r0, #28]
+ str r4, [r0, #32]
+ mov r4, #0
+ str r8, [r0, #36]
+ str r6, [r0, #40]
+ str r7, [r0, #44]
+ sbc r4, r4, #0
+ tst r4, #1
+ beq .LBB186_2
+@ BB#1: @ %carry
+ ldr r5, [r3, #32]
+ ldr r4, [r3, #20]
+ ldr r12, [r3, #28]
+ ldr r9, [r3, #4]
+ ldr lr, [r3, #12]
+ ldr r11, [r3, #16]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r3, #36]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r12, [sp, #8] @ 4-byte Spill
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r3, #40]
+ str r4, [sp, #4] @ 4-byte Spill
+ str r5, [sp, #20] @ 4-byte Spill
+ ldr r5, [r3, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [r3, #8]
+ ldr r3, [r3]
+ adds r3, r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r9, r10
+ adcs r5, r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ stm r0, {r3, r4, r5}
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [r0, #12]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #20]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ add r0, r0, #32
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r8
+ adcs r3, r3, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adc r7, r6, r7
+ stm r0, {r1, r2, r3, r7}
+.LBB186_2: @ %nocarry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end186:
+ .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF12L
+ .align 2
+ .type mcl_fp_subNF12L,%function
+mcl_fp_subNF12L: @ @mcl_fp_subNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r11, r2, #8
+ ldr r6, [r2]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r2, #4]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r0, [r2, #20]
+ ldr lr, [r1, #16]
+ ldr r7, [r1, #20]
+ ldr r5, [r1, #24]
+ ldr r4, [r1, #28]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r1, {r1, r2, r9}
+ subs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r6
+ sbcs r6, r9, r8
+ mov r9, r2
+ sbcs r10, r0, r10
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r0, lr, r11
+ add r11, r3, #8
+ ldr lr, [r3, #4]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r5, r0
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ sbc r0, r5, r7
+ ldr r7, [r3, #36]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r3, #40]
+ str r0, [sp] @ 4-byte Spill
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r3, #44]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldm r11, {r7, r8, r11}
+ ldr r4, [r3, #28]
+ ldr r5, [r3, #20]
+ ldr r0, [r3, #24]
+ ldr r3, [r3]
+ str r4, [sp, #8] @ 4-byte Spill
+ mov r4, r1
+ adds r1, r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r2, r9, lr
+ adcs lr, r6, r7
+ adcs r6, r10, r8
+ adcs r7, r3, r11
+ ldr r3, [sp, #40] @ 4-byte Reload
+ adcs r8, r3, r5
+ ldr r3, [sp, #44] @ 4-byte Reload
+ adcs r5, r3, r0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r3, r0, r3
+ str r3, [sp, #20] @ 4-byte Spill
+ asr r3, r0, #31
+ ldr r0, [sp, #4] @ 4-byte Reload
+ cmp r3, #0
+ movge r1, r4
+ movge r2, r9
+ str r1, [r12]
+ str r2, [r12, #4]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ movge lr, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ cmp r3, #0
+ movge r6, r10
+ str lr, [r12, #8]
+ str r6, [r12, #12]
+ movge r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r7, [r12, #16]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ movge r8, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r3, #0
+ str r8, [r12, #20]
+ movge r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r5, [r12, #24]
+ movge r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r11, [r12, #28]
+ movge r1, r0
+ cmp r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ movge r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [r12, #32]
+ add r1, r12, #36
+ movge r2, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movge r3, r7
+ stm r1, {r0, r2, r3}
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end187:
+ .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add12L
+ .align 2
+ .type mcl_fpDbl_add12L,%function
+mcl_fpDbl_add12L: @ @mcl_fpDbl_add12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #136
+ sub sp, sp, #136
+ ldm r1, {r7, r8, r12, lr}
+ ldm r2, {r4, r5, r6, r9}
+ ldr r10, [r2, #20]
+ adds r4, r4, r7
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r4, [sp, #108] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #72]
+ str r4, [sp, #116] @ 4-byte Spill
+ ldr r4, [r2, #76]
+ str r4, [sp, #120] @ 4-byte Spill
+ ldr r4, [r2, #80]
+ str r4, [sp, #124] @ 4-byte Spill
+ ldr r4, [r2, #84]
+ str r4, [sp, #128] @ 4-byte Spill
+ ldr r4, [r2, #88]
+ str r4, [sp, #132] @ 4-byte Spill
+ ldr r4, [r2, #92]
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, r12
+ ldr r6, [r2, #16]
+ str r4, [sp, #28] @ 4-byte Spill
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r9, lr
+ add r9, r1, #32
+ add lr, r1, #16
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldm r9, {r4, r5, r8, r9}
+ ldr r2, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r6, r1
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r2, r10, r2
+ ldr r10, [r3]
+ str r7, [r0, #8]
+ str r6, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #32]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [r3, #12]
+ str r2, [r0, #36]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [r0, #40]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #44]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [sp, #80] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r8, [sp, #88] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #104] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r9, [sp, #96] @ 4-byte Spill
+ adcs r11, r1, r2
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r11, [sp, #108] @ 4-byte Spill
+ adcs r6, r1, r2
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #112] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [r3, #8]
+ str r1, [sp, #132] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ subs r10, r12, r10
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r8, r1
+ ldr r8, [r3, #40]
+ sbcs r2, lr, r2
+ ldr lr, [r3, #32]
+ sbcs r12, r4, r5
+ ldr r4, [r3, #36]
+ ldr r3, [r3, #44]
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [sp, #68] @ 4-byte Reload
+ sbcs r3, r9, r3
+ sbcs r9, r11, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r11, r7, lr
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs lr, r7, r4
+ ldr r7, [sp, #128] @ 4-byte Reload
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbcs r8, r7, r8
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r10, [r0, #48]
+ movne r1, r4
+ str r1, [r0, #52]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #56]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #60]
+ movne r3, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r3, [r0, #64]
+ movne r9, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r0, #68]
+ movne r5, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r5, [r0, #72]
+ movne r6, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r6, [r0, #76]
+ movne r11, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #80]
+ movne lr, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str lr, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r2, r1
+ str r2, [r0, #92]
+ add sp, sp, #136
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end188:
+ .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub12L
+ .align 2
+ .type mcl_fpDbl_sub12L,%function
+mcl_fpDbl_sub12L: @ @mcl_fpDbl_sub12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #136
+ sub sp, sp, #136
+ ldr r7, [r2, #64]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2]
+ ldmib r2, {r6, r9}
+ ldr r5, [r1]
+ ldr r8, [r2, #12]
+ ldmib r1, {r4, lr}
+ ldr r12, [r1, #12]
+ ldr r10, [r2, #20]
+ subs r5, r5, r7
+ sbcs r4, r4, r6
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r2, #36]
+ ldr r6, [r2, #16]
+ sbcs r7, lr, r9
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ add r9, r1, #32
+ add lr, r1, #16
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r5, [sp, #44] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ sbcs r7, r12, r8
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #28] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldm r9, {r4, r5, r8, r9}
+ ldr r2, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r6
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs r2, r2, r10
+ str r7, [r0, #8]
+ str r6, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r9, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r9, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r9, [sp, #40] @ 4-byte Spill
+ sbcs lr, r2, r1
+ ldr r2, [sp, #96] @ 4-byte Reload
+ mov r1, #0
+ str lr, [sp, #44] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r4, r7, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #88] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #104] @ 4-byte Reload
+ sbcs r5, r7, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #96] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbcs r8, r7, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r8, [sp, #104] @ 4-byte Spill
+ sbcs r10, r7, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ sbcs r6, r7, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #112] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #120] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #120] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldmib r3, {r1, r2, r12}
+ ldr r7, [r3, #16]
+ ldr r11, [r3, #20]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r9, r3
+ ldr r9, [sp, #92] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r1, lr, r1
+ ldr lr, [sp, #128] @ 4-byte Reload
+ adcs r2, r9, r2
+ adcs r12, r4, r12
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs lr, lr, r4
+ adcs r4, r5, r11
+ ldr r5, [sp, #132] @ 4-byte Reload
+ ldr r11, [sp, #116] @ 4-byte Reload
+ adcs r5, r5, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r8, r8, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r10, r10, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #80] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adc r6, r6, r11
+ str r6, [sp, #116] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ ands r6, r6, #1
+ moveq r3, r7
+ moveq r2, r9
+ str r3, [r0, #48]
+ ldr r3, [sp, #44] @ 4-byte Reload
+ moveq r1, r3
+ cmp r6, #0
+ str r1, [r0, #52]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #56]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r12, [r0, #60]
+ moveq lr, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str lr, [r0, #64]
+ moveq r4, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r0, #68]
+ moveq r5, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r5, [r0, #72]
+ moveq r8, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r8, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r6, #0
+ str r10, [r0, #80]
+ moveq r2, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r2, [r0, #84]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #88]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #92]
+ add sp, sp, #136
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv416x32,%function
+.LmulPv416x32: @ @mulPv416x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r1, [r1, #48]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #48]
+ adc r1, r7, #0
+ str r1, [r0, #52]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre13L
+ .align 2
+ .type mcl_fp_mulUnitPre13L,%function
+mcl_fp_mulUnitPre13L: @ @mcl_fp_mulUnitPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r4, r0
+ add r0, sp, #8
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #60]
+ add r12, sp, #12
+ ldr lr, [sp, #56]
+ ldr r8, [sp, #52]
+ ldr r9, [sp, #48]
+ ldr r10, [sp, #44]
+ ldr r11, [sp, #40]
+ ldr r5, [sp, #36]
+ ldr r6, [sp, #32]
+ ldr r7, [sp, #28]
+ ldr r3, [sp, #8]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r12}
+ str r3, [r4]
+ stmib r4, {r0, r1, r2, r12}
+ str r7, [r4, #20]
+ str r6, [r4, #24]
+ str r5, [r4, #28]
+ str r11, [r4, #32]
+ str r10, [r4, #36]
+ str r9, [r4, #40]
+ str r8, [r4, #44]
+ str lr, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre13L
+ .align 2
+ .type mcl_fpDbl_mulPre13L,%function
+mcl_fpDbl_mulPre13L: @ @mcl_fpDbl_mulPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #820
+ sub sp, sp, #820
+ mov r7, r2
+ mov r4, r0
+ add r0, sp, #760
+ str r1, [sp, #84] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r7]
+ str r7, [sp, #80] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #764]
+ ldr r2, [r7, #4]
+ mov r6, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [r4]
+ add r0, sp, #704
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #728
+ add lr, sp, #704
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #8]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #700]
+ add lr, sp, #676
+ add r9, sp, #656
+ ldr r11, [sp, #692]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r5, r7, r12, lr}
+ ldr r8, [sp, #648]
+ ldr r10, [sp, #652]
+ ldm r9, {r0, r1, r2, r3, r9}
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r6
+ str r6, [r4, #8]
+ mov r6, r4
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r4, r10, r4
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #644]
+ add lr, sp, #612
+ add r7, sp, #600
+ ldr r8, [sp, #628]
+ ldr r11, [sp, #624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r0, [sp, #592]
+ ldr r9, [sp, #596]
+ ldm r7, {r1, r2, r7}
+ ldr r10, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r6, #12]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r9, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #536
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r8, [sp, #536]
+ add r4, sp, #540
+ ldr r11, [sp, #580]
+ ldr r9, [sp, #576]
+ ldr lr, [sp, #572]
+ ldr r5, [sp, #568]
+ ldr r10, [sp, #564]
+ ldr r12, [sp, #560]
+ ldr r3, [sp, #556]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ adds r6, r8, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r4, {r0, r1, r2, r4}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r6, [r7, #16]
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r4, #20]
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #532]
+ add r10, sp, #480
+ add r12, sp, #492
+ ldr r6, [sp, #516]
+ ldr r11, [sp, #512]
+ ldr lr, [sp, #508]
+ ldr r9, [sp, #504]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #524]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r0, r1, r10}
+ ldm r12, {r2, r3, r12}
+ ldr r8, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r7, #20]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mov r7, r5
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #476]
+ add r5, sp, #428
+ ldr r11, [sp, #464]
+ ldr r9, [sp, #460]
+ ldr lr, [sp, #456]
+ ldr r10, [sp, #452]
+ ldr r12, [sp, #448]
+ ldr r3, [sp, #444]
+ ldr r8, [sp, #424]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r5, {r0, r1, r2, r5}
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r4
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r6, [r4, #24]
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r5, #28]
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #420]
+ add r12, sp, #388
+ add r10, sp, #368
+ ldr lr, [sp, #408]
+ ldr r6, [sp, #404]
+ ldr r11, [sp, #400]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r12, {r3, r9, r12}
+ ldr r7, [sp, #384]
+ ldm r10, {r0, r1, r10}
+ ldr r8, [sp, #24] @ 4-byte Reload
+ ldr r2, [sp, #380]
+ adds r0, r0, r8
+ str r0, [r4, #28]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #364]
+ add r11, sp, #344
+ add lr, sp, #316
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r7, r9, r11}
+ ldr r10, [sp, #340]
+ ldr r8, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r6, [r5, #32]
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #36]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #308]
+ add lr, sp, #288
+ add r12, sp, #268
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r7, r8, lr}
+ ldr r11, [sp, #284]
+ ldr r1, [sp, #256]
+ ldr r0, [sp, #260]
+ ldr r10, [sp, #264]
+ ldm r12, {r2, r3, r9, r12}
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r1, r1, r4
+ str r1, [r5, #36]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #252]
+ add r11, sp, #228
+ add lr, sp, #204
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r7, r8, r10, r11}
+ ldr r9, [sp, #200]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r9, r4
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r6, [r4, #40]
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #196]
+ add r11, sp, #164
+ add r12, sp, #152
+ ldr lr, [sp, #184]
+ ldr r7, [sp, #180]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r8, r10, r11}
+ ldr r2, [sp, #144]
+ ldr r1, [sp, #148]
+ ldm r12, {r0, r3, r12}
+ ldr r9, [sp, #24] @ 4-byte Reload
+ adds r2, r2, r9
+ str r2, [r4, #44]
+ ldr r2, [r6, #48]
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #88
+ add r11, sp, #104
+ ldm r3, {r0, r1, r2, r3}
+ adds r12, r0, r6
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs lr, r1, r9
+ adcs r5, r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r6, r3, r0
+ ldr r0, [sp, #140]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11}
+ str r12, [r4, #48]
+ str lr, [r4, #52]
+ str r5, [r4, #56]
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r6, [r4, #60]
+ ldr r6, [sp, #28] @ 4-byte Reload
+ add r12, r4, #80
+ adcs r0, r0, r5
+ adcs r1, r1, r6
+ str r0, [r4, #64]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r0, [r4, #72]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r1, r8, r1
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adc r6, r6, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ str r6, [r4, #100]
+ add sp, sp, #820
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre13L
+ .align 2
+ .type mcl_fpDbl_sqrPre13L,%function
+mcl_fpDbl_sqrPre13L: @ @mcl_fpDbl_sqrPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #820
+ sub sp, sp, #820
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #760
+ ldr r2, [r5]
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #764]
+ ldr r2, [r5, #4]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r1, [sp, #32] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [r4]
+ add r0, sp, #704
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #728
+ add lr, sp, #704
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #700]
+ add lr, sp, #680
+ add r11, sp, #656
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldr r8, [sp, #648]
+ ldr r10, [sp, #652]
+ ldm r11, {r0, r1, r2, r3, r9, r11}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r8, r7
+ str r7, [r4, #8]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r10, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #644]
+ add r9, sp, #620
+ add lr, sp, #600
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #592]
+ ldr r11, [sp, #596]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r10, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r4, #12]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #536
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #588]
+ add r12, sp, #540
+ ldr r11, [sp, #576]
+ ldr lr, [sp, #572]
+ ldr r6, [sp, #568]
+ ldr r8, [sp, #536]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r9, r10, r12}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r8, r7
+ str r7, [r4, #16]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #532]
+ add r10, sp, #512
+ add lr, sp, #484
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #524]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r9, [sp, #480]
+ ldr r11, [sp, #508]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r9, r7
+ str r7, [r4, #20]
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #476]
+ add r8, sp, #456
+ add r12, sp, #432
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr lr, [sp, #452]
+ ldr r10, [sp, #448]
+ ldr r0, [sp, #424]
+ ldr r11, [sp, #428]
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r4, #24]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #400
+ add lr, sp, #372
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r11}
+ ldr r10, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r9, r12, lr}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r4, #28]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #364]
+ add r10, sp, #344
+ add lr, sp, #316
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r9, [sp, #312]
+ ldr r11, [sp, #340]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r9, r7
+ str r7, [r4, #32]
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #308]
+ add r8, sp, #288
+ add r12, sp, #264
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #300]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr lr, [sp, #284]
+ ldr r10, [sp, #280]
+ ldr r0, [sp, #256]
+ ldr r11, [sp, #260]
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r4, #36]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #40]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #252]
+ add r10, sp, #228
+ add r12, sp, #200
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r10}
+ ldr lr, [sp, #224]
+ ldr r9, [sp, #220]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #40]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #196]
+ add r12, sp, #148
+ ldr r7, [sp, #180]
+ ldr r11, [sp, #176]
+ ldr r8, [sp, #172]
+ ldr lr, [sp, #168]
+ ldr r10, [sp, #164]
+ ldr r2, [sp, #144]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r12, {r0, r1, r3, r12}
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adds r2, r2, r6
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r2, [r4, #44]
+ ldr r2, [r5, #48]
+ adcs r6, r0, r6
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #88
+ add r11, sp, #104
+ ldm r3, {r0, r1, r2, r3}
+ adds r12, r0, r6
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs lr, r1, r9
+ adcs r5, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r3, r0
+ ldr r0, [sp, #140]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11}
+ str r12, [r4, #48]
+ str lr, [r4, #52]
+ str r5, [r4, #56]
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r6, [r4, #60]
+ ldr r6, [sp, #36] @ 4-byte Reload
+ add r12, r4, #80
+ adcs r0, r0, r5
+ adcs r1, r1, r6
+ str r0, [r4, #64]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r0, [r4, #72]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r3, [sp, #76] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r1, r8, r1
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adc r6, r6, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ str r6, [r4, #100]
+ add sp, sp, #820
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont13L
+ .align 2
+ .type mcl_fp_mont13L,%function
+mcl_fp_mont13L: @ @mcl_fp_mont13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #100
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, r6, #488
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #96] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1516]
+ ldr r7, [sp, #1512]
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ mul r2, r7, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1548]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1508]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #1480]
+ ldr r10, [sp, #1476]
+ ldr r11, [sp, #1472]
+ ldr r6, [sp, #1456]
+ ldr r9, [sp, #1460]
+ ldr r8, [sp, #1464]
+ ldr r4, [sp, #1468]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #376
+ bl .LmulPv416x32(PLT)
+ adds r0, r6, r7
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1416]
+ ldr r12, [sp, #1420]
+ ldr lr, [sp, #1424]
+ ldr r6, [sp, #1432]
+ ldr r7, [sp, #1436]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1444]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1440]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1428]
+ adcs r1, r11, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ ldr r11, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r10, r1
+ ldr r10, [sp, #1448]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #1400]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1412]
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #1408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1368]
+ ldr r9, [sp, #1364]
+ ldr r10, [sp, #1360]
+ ldr r11, [sp, #1344]
+ ldr r6, [sp, #1348]
+ ldr r7, [sp, #1352]
+ ldr r4, [sp, #1356]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #264
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1288]
+ ldr r2, [sp, #1300]
+ ldr r3, [sp, #1304]
+ ldr r12, [sp, #1308]
+ ldr lr, [sp, #1312]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1320]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1324]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1316]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1336]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1332]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1328]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1296]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1232
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1284]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1256]
+ ldr r9, [sp, #1252]
+ ldr r10, [sp, #1248]
+ ldr r11, [sp, #1232]
+ ldr r6, [sp, #1236]
+ ldr r7, [sp, #1240]
+ ldr r4, [sp, #1244]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #152
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1176]
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1212]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1204]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1224]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1220]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1216]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1184]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1120
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1144]
+ ldr r9, [sp, #1140]
+ ldr r10, [sp, #1136]
+ ldr r11, [sp, #1120]
+ ldr r6, [sp, #1124]
+ ldr r7, [sp, #1128]
+ ldr r4, [sp, #1132]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #40
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1064]
+ ldr r2, [sp, #1076]
+ ldr r3, [sp, #1080]
+ ldr r12, [sp, #1084]
+ ldr lr, [sp, #1088]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1096]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1100]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1092]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1112]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1108]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1104]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1072]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1008
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1032]
+ ldr r9, [sp, #1028]
+ ldr r10, [sp, #1024]
+ ldr r11, [sp, #1008]
+ ldr r6, [sp, #1012]
+ ldr r7, [sp, #1016]
+ ldr r4, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #952
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #896
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #948]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #920]
+ ldr r9, [sp, #916]
+ ldr r10, [sp, #912]
+ ldr r11, [sp, #896]
+ ldr r6, [sp, #900]
+ ldr r7, [sp, #904]
+ ldr r4, [sp, #908]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #840
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #844
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #868
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #840]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #784
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #836]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #808]
+ ldr r9, [sp, #804]
+ ldr r10, [sp, #800]
+ ldr r11, [sp, #784]
+ ldr r6, [sp, #788]
+ ldr r7, [sp, #792]
+ ldr r4, [sp, #796]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #728
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #732
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #756
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #728]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #672
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #724]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #696]
+ ldr r9, [sp, #692]
+ ldr r10, [sp, #688]
+ ldr r11, [sp, #672]
+ ldr r6, [sp, #676]
+ ldr r7, [sp, #680]
+ ldr r4, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #616
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #620
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #644
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #616]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #560
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #612]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #584]
+ ldr r9, [sp, #580]
+ ldr r10, [sp, #576]
+ ldr r11, [sp, #560]
+ ldr r6, [sp, #564]
+ ldr r7, [sp, #568]
+ ldr r4, [sp, #572]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #504
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #508
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r6, r8, r9, r10}
+ ldr r5, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adds r5, r11, r5
+ adcs r0, r7, r0
+ str r5, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r5, r8
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #500]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #472]
+ ldr r9, [sp, #468]
+ ldr r10, [sp, #464]
+ ldr r11, [sp, #448]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #456]
+ ldr r4, [sp, #460]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #392
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #408
+ adds r0, r0, r11
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ add r6, sp, #392
+ adcs r11, r1, r7
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #432
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #36] @ 4-byte Spill
+ ldm r6, {r2, r5, r6}
+ ldr r4, [sp, #404]
+ adds r0, r0, r2
+ mul r1, r0, r8
+ adcs r5, r11, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #80] @ 4-byte Reload
+ adcs r4, r5, r4
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #388]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r6, [sp, #364]
+ ldr r8, [sp, #360]
+ ldr r9, [sp, #356]
+ ldr r10, [sp, #352]
+ ldr r7, [sp, #336]
+ ldr r4, [sp, #340]
+ ldr r11, [sp, #344]
+ ldr r5, [sp, #348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #280
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #296
+ adds r0, r0, r7
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #292]
+ adcs r11, r1, r11
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #288]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #320
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #280]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #28] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r1, [sp, #92] @ 4-byte Spill
+ mul r2, r1, r0
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r4, r5, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv416x32(PLT)
+ ldr r1, [sp, #276]
+ add r11, sp, #224
+ ldr r4, [sp, #252]
+ ldr r8, [sp, #248]
+ ldr r9, [sp, #244]
+ ldr r10, [sp, #240]
+ add r0, sp, #168
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #272]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #268]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #264]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #260]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #256]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r7, r11}
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r5, [sp, #236]
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #184
+ adds r0, r0, r6
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #168
+ adcs r1, r1, r11
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #208
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #48] @ 4-byte Spill
+ ldm r7, {r2, r6, r7}
+ ldr r5, [sp, #180]
+ adds r4, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r1, r4, r0
+ ldr r0, [sp, #220]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r6, r0, r1
+ mov r0, #0
+ mov r1, r10
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r0, [sp, #128]
+ adcs r12, r5, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #132]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #136]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r10
+ ldmib r0, {r1, r2, r3, r5}
+ ldr r6, [r0]
+ ldr r10, [r0, #20]
+ ldr r11, [r0, #28]
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r0, #24]
+ subs r6, r7, r6
+ sbcs r9, lr, r1
+ str r5, [sp, #44] @ 4-byte Spill
+ mov r5, r0
+ sbcs r0, r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r1, [r5, #48]
+ sbcs r3, r4, r3
+ ldr lr, [r5, #32]
+ ldr r12, [r5, #36]
+ ldr r8, [r5, #40]
+ ldr r4, [r5, #44]
+ ldr r5, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ sbcs r7, r2, r10
+ ldr r2, [sp, #80] @ 4-byte Reload
+ sbcs r2, r2, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ sbcs r10, r5, r11
+ ldr r5, [sp, #88] @ 4-byte Reload
+ sbcs r11, r5, lr
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r12, r5, r12
+ ldr r5, [sp, #96] @ 4-byte Reload
+ sbcs lr, r5, r8
+ ldr r5, [sp, #100] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #104] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [sp, #108] @ 4-byte Reload
+ sbcs r5, r5, r4
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r5, r5, #0
+ ands r8, r5, #1
+ ldr r5, [sp, #48] @ 4-byte Reload
+ movne r6, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r6, [r5]
+ ldr r6, [sp, #52] @ 4-byte Reload
+ movne r9, r6
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r9, [r5, #4]
+ movne r0, r6
+ cmp r8, #0
+ str r0, [r5, #8]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ movne r3, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r3, [r5, #12]
+ movne r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [r5, #16]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ movne r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ cmp r8, #0
+ str r7, [r5, #20]
+ movne r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r2, [r5, #24]
+ movne r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r10, [r5, #28]
+ movne r11, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ cmp r8, #0
+ str r11, [r5, #32]
+ movne r12, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r12, [r5, #36]
+ movne lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str lr, [r5, #40]
+ movne r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp r8, #0
+ str r1, [r5, #44]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ movne r1, r0
+ str r1, [r5, #48]
+ add sp, sp, #548
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end194:
+ .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF13L
+ .align 2
+ .type mcl_fp_montNF13L,%function
+mcl_fp_montNF13L: @ @mcl_fp_montNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #100
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, r6, #488
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #96] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1516]
+ ldr r8, [sp, #1512]
+ mov r1, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ mul r2, r8, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1548]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1508]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r10, [sp, #1480]
+ ldr r11, [sp, #1476]
+ ldr r6, [sp, #1472]
+ ldr r7, [sp, #1456]
+ ldr r9, [sp, #1460]
+ ldr r4, [sp, #1464]
+ ldr r5, [sp, #1468]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #376
+ bl .LmulPv416x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1412]
+ ldr r3, [sp, #1416]
+ ldr r12, [sp, #1420]
+ ldr lr, [sp, #1424]
+ ldr r7, [sp, #1436]
+ ldr r8, [sp, #1440]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1444]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1400]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #1428]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1432]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1448]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r11, r11, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #1408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1368]
+ ldr r9, [sp, #1364]
+ ldr r10, [sp, #1360]
+ ldr r11, [sp, #1344]
+ ldr r6, [sp, #1348]
+ ldr r7, [sp, #1352]
+ ldr r5, [sp, #1356]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #264
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #1312
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1288]
+ ldr r7, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #1292]
+ ldr r2, [sp, #1296]
+ ldr r3, [sp, #1300]
+ ldr r12, [sp, #1304]
+ ldr lr, [sp, #1308]
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1232
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1284]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r9, [sp, #1256]
+ ldr r10, [sp, #1252]
+ ldr r11, [sp, #1248]
+ ldr r7, [sp, #1232]
+ ldr r5, [sp, #1236]
+ ldr r4, [sp, #1240]
+ ldr r6, [sp, #1244]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #152
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r7, [sp, #1212]
+ ldr r8, [sp, #1216]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1204]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1176]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1224]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1220]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #1184]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1120
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1144]
+ ldr r9, [sp, #1140]
+ ldr r10, [sp, #1136]
+ ldr r11, [sp, #1120]
+ ldr r6, [sp, #1124]
+ ldr r7, [sp, #1128]
+ ldr r5, [sp, #1132]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #40
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #1088
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1064]
+ ldr r7, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #1068]
+ ldr r2, [sp, #1072]
+ ldr r3, [sp, #1076]
+ ldr r12, [sp, #1080]
+ ldr lr, [sp, #1084]
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1008
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1060]
+ add r11, sp, #1016
+ ldr r9, [sp, #1032]
+ ldr r10, [sp, #1028]
+ ldr r7, [sp, #1008]
+ ldr r5, [sp, #1012]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #952
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #896
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #948]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #920]
+ ldr r9, [sp, #916]
+ ldr r10, [sp, #912]
+ ldr r11, [sp, #896]
+ ldr r6, [sp, #900]
+ ldr r7, [sp, #904]
+ ldr r5, [sp, #908]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #840
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #864
+ add lr, sp, #840
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #784
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #836]
+ add r11, sp, #792
+ ldr r9, [sp, #808]
+ ldr r10, [sp, #804]
+ ldr r7, [sp, #784]
+ ldr r5, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #728
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #732
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #756
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #728]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #672
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #724]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #696]
+ ldr r9, [sp, #692]
+ ldr r10, [sp, #688]
+ ldr r11, [sp, #672]
+ ldr r6, [sp, #676]
+ ldr r7, [sp, #680]
+ ldr r5, [sp, #684]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #616
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #640
+ add lr, sp, #616
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #560
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #568
+ ldr r9, [sp, #584]
+ ldr r10, [sp, #580]
+ ldr r7, [sp, #560]
+ ldr r5, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #504
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #508
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r2, r11, r8
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #500]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r9, [sp, #468]
+ ldr r10, [sp, #464]
+ ldr r11, [sp, #448]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #456]
+ ldr r5, [sp, #460]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #392
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #408
+ ldr r4, [sp, #400]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #396]
+ adcs r1, r1, r7
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #404]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #432
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #392]
+ str r1, [sp, #40] @ 4-byte Spill
+ adds r0, r0, r2
+ mul r1, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #388]
+ add r9, sp, #344
+ ldr r6, [sp, #364]
+ ldr r7, [sp, #360]
+ ldr r8, [sp, #356]
+ ldr r10, [sp, #336]
+ ldr r11, [sp, #340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r5, r9}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #280
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #296
+ adds r0, r0, r10
+ add r10, sp, #320
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r4
+ ldr r4, [sp, #288]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #292]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #280]
+ str r1, [sp, #32] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ mul r2, r1, r0
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv416x32(PLT)
+ ldr r1, [sp, #276]
+ add r9, sp, #232
+ ldr r6, [sp, #252]
+ ldr r7, [sp, #248]
+ ldr r8, [sp, #244]
+ ldr r10, [sp, #224]
+ ldr r11, [sp, #228]
+ add r0, sp, #168
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #272]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #268]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #264]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #260]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #256]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r5, r9}
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #184
+ adds r0, r0, r10
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r4
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r7
+ add r7, sp, #168
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldm r7, {r2, r6, r7}
+ ldr r5, [sp, #180]
+ ldr r4, [sp, #216]
+ ldr r9, [sp, #212]
+ ldr r8, [sp, #208]
+ adds r10, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r1, r10, r0
+ ldr r0, [sp, #220]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r4
+ adc r6, r0, #0
+ add r0, sp, #112
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ adcs r12, r11, r1
+ ldr r0, [sp, #128]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs lr, r5, r3
+ str r2, [sp, #56] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #132]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r9, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #136]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r10, r1, r0
+ ldr r0, [sp, #148]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r10, [sp, #68] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adc r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldmib r6, {r0, r1, r7}
+ ldr r5, [r6, #24]
+ ldr r4, [r6, #28]
+ ldr r3, [r6, #16]
+ ldr r11, [r6, #20]
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [r6]
+ str r4, [sp, #44] @ 4-byte Spill
+ subs r5, r12, r5
+ sbcs r8, r2, r0
+ sbcs r2, lr, r1
+ sbcs lr, r9, r7
+ add r7, r6, #32
+ ldm r7, {r0, r1, r7}
+ ldr r4, [r6, #44]
+ ldr r9, [r6, #48]
+ ldr r6, [sp, #76] @ 4-byte Reload
+ sbcs r3, r6, r3
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r12, r6, r11
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r11, r6, r4
+ ldr r4, [sp, #44] @ 4-byte Reload
+ sbcs r10, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ sbcs r4, r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r6, r0, r1
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r7, r0, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbc r9, r0, r9
+ ldr r0, [sp, #52] @ 4-byte Reload
+ asr r1, r9, #31
+ cmp r1, #0
+ movlt r5, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [r0]
+ ldr r5, [sp, #56] @ 4-byte Reload
+ movlt r8, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r8, [r0, #4]
+ movlt r2, r5
+ cmp r1, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #108] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r11, [r0, #24]
+ movlt r10, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ cmp r1, #0
+ str r4, [r0, #32]
+ movlt r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r6, [r0, #36]
+ movlt r7, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r7, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r9, r1
+ str r9, [r0, #48]
+ add sp, sp, #548
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end195:
+ .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed13L
+ .align 2
+ .type mcl_fp_montRed13L,%function
+mcl_fp_montRed13L: @ @mcl_fp_montRed13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #908
+ sub sp, sp, #908
+ mov r3, r2
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r11, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #168] @ 4-byte Spill
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #172] @ 4-byte Spill
+ mul r2, r11, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #900]
+ add r10, sp, #872
+ add lr, sp, #848
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #172] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #792
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #844]
+ add lr, sp, #832
+ add r9, sp, #800
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #792]
+ ldr r5, [sp, #828]
+ ldr r6, [sp, #824]
+ ldr r7, [sp, #820]
+ ldr r10, [sp, #816]
+ ldr r8, [sp, #812]
+ ldr r1, [sp, #796]
+ ldm r9, {r0, r2, r9}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #736
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #788]
+ add r10, sp, #760
+ add lr, sp, #736
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #732]
+ add lr, sp, #720
+ add r10, sp, #688
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #680]
+ ldr r5, [sp, #716]
+ ldr r6, [sp, #712]
+ ldr r7, [sp, #708]
+ ldr r1, [sp, #684]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #624
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #648
+ add lr, sp, #624
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #568
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #620]
+ add lr, sp, #608
+ add r10, sp, #576
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #568]
+ ldr r5, [sp, #604]
+ ldr r6, [sp, #600]
+ ldr r7, [sp, #596]
+ ldr r1, [sp, #572]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #564]
+ add r10, sp, #536
+ add lr, sp, #512
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #508]
+ add lr, sp, #496
+ add r10, sp, #464
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #456]
+ ldr r5, [sp, #492]
+ ldr r6, [sp, #488]
+ ldr r7, [sp, #484]
+ ldr r1, [sp, #460]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #452]
+ add r10, sp, #424
+ add lr, sp, #400
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #396]
+ add lr, sp, #384
+ add r10, sp, #352
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #344]
+ ldr r5, [sp, #380]
+ ldr r6, [sp, #376]
+ ldr r7, [sp, #372]
+ ldr r1, [sp, #348]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #172] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r11, r7
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #340]
+ add r10, sp, #312
+ add lr, sp, #288
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ mov r4, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r11, r4
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ mov r9, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv416x32(PLT)
+ add r7, sp, #232
+ add lr, sp, #272
+ ldm r7, {r0, r1, r3, r7}
+ ldr r8, [sp, #284]
+ adds r0, r11, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r11, r0, r1
+ mul r0, r11, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #256
+ str r0, [sp, #60] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r6, [sp, #268]
+ ldm r7, {r1, r2, r7}
+ ldr r0, [sp, #248]
+ ldr r3, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #252]
+ adcs r10, r3, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #176
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r11, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #192]
+ adcs r3, r10, r3
+ str r2, [sp, #64] @ 4-byte Spill
+ str r3, [sp, #68] @ 4-byte Spill
+ adcs r7, r4, r0
+ ldr r0, [sp, #196]
+ str r7, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #200]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #204]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #80] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #208]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adcs r11, r1, r0
+ ldr r0, [sp, #216]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r11, [sp, #92] @ 4-byte Spill
+ adcs r10, r1, r0
+ ldr r0, [sp, #220]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r10, [sp, #100] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #224]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r9, [sp, #108] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #228]
+ str r8, [sp, #168] @ 4-byte Spill
+ adcs lr, r1, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str lr, [sp, #104] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #152] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #136] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #140] @ 4-byte Reload
+ sbcs r12, r4, r7
+ ldr r4, [sp, #144] @ 4-byte Reload
+ ldr r7, [sp, #172] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #148] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #116] @ 4-byte Reload
+ sbcs r7, r11, r7
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs r11, r10, r7
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r9, r9, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r10, r8, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r8, lr, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands lr, r7, #1
+ ldr r7, [sp, #52] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #164] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp lr, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne r12, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r12, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ cmp lr, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ str r5, [r7, #24]
+ movne r6, r0
+ ldr r0, [sp, #160] @ 4-byte Reload
+ movne r0, r1
+ str r6, [r7, #28]
+ cmp lr, #0
+ str r0, [r7, #32]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r11, [r7, #36]
+ movne r9, r0
+ ldr r0, [sp, #168] @ 4-byte Reload
+ str r9, [r7, #40]
+ movne r10, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp lr, #0
+ str r10, [r7, #44]
+ movne r8, r0
+ str r8, [r7, #48]
+ add sp, sp, #908
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end196:
+ .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre13L
+ .align 2
+ .type mcl_fp_addPre13L,%function
+mcl_fp_addPre13L: @ @mcl_fp_addPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r11, [r2]
+ ldr r4, [r2, #16]
+ ldr r10, [r2, #32]
+ adds r8, r11, r3
+ ldr r3, [r2, #48]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r11, [r1, #44]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ ldr lr, [r1, #32]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ ldr r2, [r1, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ adcs r4, r7, r9
+ ldr r7, [r1, #40]
+ ldr r9, [r1, #48]
+ str r3, [sp, #4] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r4, [r0, #12]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [r0, #16]
+ adcs r2, r4, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r2, r2, r12
+ str r1, [r0, #24]
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r1, r10, lr
+ adcs r2, r2, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r11
+ adcs r6, r6, r9
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end197:
+ .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre13L
+ .align 2
+ .type mcl_fp_subPre13L,%function
+mcl_fp_subPre13L: @ @mcl_fp_subPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r12, [r2, #4]
+ ldr r4, [r2, #8]
+ ldr r11, [r2, #12]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r7, r6, r7
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldmib r1, {r5, lr}
+ ldr r6, [r2, #48]
+ ldr r3, [r1, #12]
+ ldr r10, [r2, #32]
+ ldr r8, [r1, #44]
+ ldr r9, [r1, #48]
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r2, #44]
+ sbcs r5, r5, r12
+ add r12, r1, #16
+ sbcs r4, lr, r4
+ sbcs lr, r3, r11
+ ldr r3, [r2, #36]
+ ldr r11, [r1, #36]
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ ldr r2, [r1, #40]
+ str r3, [sp, #4] @ 4-byte Spill
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r1, #32]
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r7, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r1, r1, r4
+ sbcs r2, r2, r7
+ str r1, [r0, #16]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #8] @ 4-byte Reload
+ sbcs r2, r12, r2
+ str r1, [r0, #24]
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r1, r6, r10
+ ldr r6, [sp, #32] @ 4-byte Reload
+ sbcs r2, r11, r2
+ sbcs r3, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r7, r8, r7
+ sbcs r6, r9, r6
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end198:
+ .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_13L
+ .align 2
+ .type mcl_fp_shr1_13L,%function
+mcl_fp_shr1_13L: @ @mcl_fp_shr1_13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ add r9, r1, #8
+ ldm r9, {r2, r3, r4, r5, r8, r9}
+ ldm r1, {r10, lr}
+ ldr r12, [r1, #36]
+ lsr r7, lr, #1
+ lsr r6, r3, #1
+ lsrs r3, r3, #1
+ orr r11, r7, r2, lsl #31
+ ldr r7, [r1, #48]
+ rrx r2, r2
+ lsrs r3, lr, #1
+ rrx r3, r10
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ ldr r1, [r1, #32]
+ stm r0, {r3, r11}
+ str r2, [r0, #8]
+ orr r2, r6, r4, lsl #31
+ str r2, [r0, #12]
+ lsrs r2, r5, #1
+ ldr r6, [sp] @ 4-byte Reload
+ rrx r2, r4
+ str r2, [r0, #16]
+ lsr r2, r5, #1
+ orr r2, r2, r8, lsl #31
+ str r2, [r0, #20]
+ lsrs r2, r9, #1
+ rrx r2, r8
+ str r2, [r0, #24]
+ lsr r2, r9, #1
+ orr r2, r2, r1, lsl #31
+ str r2, [r0, #28]
+ lsrs r2, r12, #1
+ lsr r2, r12, #1
+ rrx r1, r1
+ lsrs r3, r6, #1
+ add r12, r0, #32
+ orr r2, r2, r7, lsl #31
+ rrx r3, r7
+ lsr r7, r6, #1
+ ldr r6, [sp, #4] @ 4-byte Reload
+ orr r7, r7, r6, lsl #31
+ lsr r6, r6, #1
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end199:
+ .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add13L
+ .align 2
+ .type mcl_fp_add13L,%function
+mcl_fp_add13L: @ @mcl_fp_add13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r11, r4, r9
+ ldr r9, [r1, #24]
+ adcs r4, r5, r8
+ ldr r5, [r1, #20]
+ adcs r6, r6, lr
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r1, #16]
+ mov lr, r11
+ adcs r7, r7, r12
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #32]
+ str lr, [r0]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r8, r7, r4
+ ldr r4, [r2, #20]
+ adcs r7, r4, r5
+ ldr r5, [r2, #24]
+ ldr r4, [r1, #28]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r5, r9
+ ldr r5, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r11, [sp, #4] @ 4-byte Reload
+ adcs r7, r5, r4
+ ldr r5, [r1, #32]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r10, r6, r5
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r4, [r0, #4]
+ str r10, [sp, #24] @ 4-byte Spill
+ adcs r9, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ adcs r12, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ ldr r1, [r1, #48]
+ ldr r2, [r2, #48]
+ adcs r6, r5, r6
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ str r5, [r0, #8]
+ str r7, [r0, #12]
+ str r8, [r0, #16]
+ str r1, [r0, #20]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r11, [r0, #24]
+ str r1, [r0, #28]
+ str r10, [r0, #32]
+ str r9, [r0, #36]
+ str r12, [r0, #40]
+ str r6, [r0, #44]
+ str r2, [r0, #48]
+ mov r2, #0
+ mov r10, r12
+ adc r1, r2, #0
+ str r1, [sp, #8] @ 4-byte Spill
+ ldm r3, {r2, r6}
+ ldr r1, [r3, #8]
+ ldr r12, [r3, #12]
+ subs r2, lr, r2
+ str r2, [sp] @ 4-byte Spill
+ sbcs r2, r4, r6
+ sbcs r1, r5, r1
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r7, r7, r12
+ add r12, r3, #32
+ sbcs r8, r8, r1
+ ldr r1, [r3, #20]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r11, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r5, r2, r1
+ ldm r12, {r1, r2, r6, r11, r12}
+ ldr r3, [sp, #24] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r4, r9, r2
+ sbcs lr, r10, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs r2, r1, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r1, r1, r12
+ sbc r6, r6, #0
+ tst r6, #1
+ bne .LBB200_2
+@ BB#1: @ %nocarry
+ mov r6, r7
+ ldr r7, [sp] @ 4-byte Reload
+ add r12, r0, #32
+ str r7, [r0]
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r6, [r0, #12]
+ str r8, [r0, #16]
+ str r7, [r0, #20]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #24]
+ str r5, [r0, #28]
+ stm r12, {r3, r4, lr}
+ str r2, [r0, #44]
+ str r1, [r0, #48]
+.LBB200_2: @ %carry
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end200:
+ .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF13L
+ .align 2
+ .type mcl_fp_addNF13L,%function
+mcl_fp_addNF13L: @ @mcl_fp_addNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r1, {r7, r8, lr}
+ ldr r6, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r5, r9}
+ adds r10, r6, r7
+ ldr r7, [r2, #16]
+ ldr r6, [r1, #24]
+ adcs r4, r4, r8
+ adcs lr, r5, lr
+ ldr r5, [r1, #16]
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ adcs r9, r9, r12
+ str lr, [sp, #8] @ 4-byte Spill
+ str r9, [sp, #12] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #20]
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r7, r5, r4
+ ldr r5, [r2, #24]
+ str r7, [sp, #36] @ 4-byte Spill
+ adcs r8, r5, r6
+ ldr r6, [r1, #28]
+ ldr r5, [r2, #28]
+ str r8, [sp, #16] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #32]
+ ldr r5, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ ldr r1, [r1, #48]
+ ldr r2, [r2, #48]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ adc r1, r2, r1
+ str r7, [sp, #48] @ 4-byte Spill
+ str r1, [sp, #60] @ 4-byte Spill
+ ldmib r3, {r1, r12}
+ ldr r2, [r3, #24]
+ ldr r7, [r3]
+ ldr r6, [r3, #12]
+ ldr r5, [r3, #16]
+ ldr r4, [r3, #20]
+ ldr r11, [r3, #28]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ subs r7, r10, r7
+ sbcs r2, r2, r1
+ ldr r1, [r3, #40]
+ sbcs r12, lr, r12
+ sbcs lr, r9, r6
+ ldr r9, [r3, #32]
+ ldr r6, [r3, #36]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r3, r1, r4
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r4, r8, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r8, r1, r11
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r9, r1, r9
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r11, r1, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbc r6, r1, r6
+ asr r1, r6, #31
+ cmp r1, #0
+ movlt r7, r10
+ str r7, [r0]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ movlt r2, r7
+ str r2, [r0, #4]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #8]
+ movlt lr, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r5, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r5, [r0, #16]
+ movlt r3, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r6, r1
+ str r6, [r0, #48]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end201:
+ .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub13L
+ .align 2
+ .type mcl_fp_sub13L,%function
+mcl_fp_sub13L: @ @mcl_fp_sub13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r12, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r11, r4, r9
+ ldr r4, [r2, #24]
+ sbcs r5, r5, r8
+ str r11, [sp, #28] @ 4-byte Spill
+ str r11, [r0]
+ sbcs r6, r6, lr
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ ldr r11, [sp, #44] @ 4-byte Reload
+ sbcs r10, r7, r6
+ ldr r7, [r1, #20]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r12, r7, r5
+ ldr r7, [r1, #24]
+ ldr r5, [r1, #28]
+ sbcs r8, r7, r4
+ ldr r7, [r2, #28]
+ ldr r4, [r1, #36]
+ str r8, [sp, #40] @ 4-byte Spill
+ sbcs r9, r5, r7
+ ldr r7, [r2, #32]
+ ldr r5, [r1, #32]
+ sbcs r5, r5, r7
+ ldr r7, [r2, #36]
+ sbcs r6, r4, r7
+ ldr r7, [r2, #40]
+ ldr r4, [r1, #40]
+ sbcs lr, r4, r7
+ ldr r7, [r2, #44]
+ ldr r4, [r1, #44]
+ ldr r2, [r2, #48]
+ ldr r1, [r1, #48]
+ sbcs r7, r4, r7
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r4, [r0, #4]
+ str r1, [r0, #8]
+ str r11, [r0, #12]
+ str r10, [r0, #16]
+ str r12, [r0, #20]
+ str r8, [r0, #24]
+ str r9, [r0, #28]
+ str r5, [r0, #32]
+ str r6, [r0, #36]
+ str lr, [r0, #40]
+ str r7, [r0, #44]
+ str r2, [r0, #48]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB202_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #48]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ ldr r10, [r3, #4]
+ ldr r8, [r3, #8]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #12]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3]
+ adds r2, r2, r7
+ ldr r7, [r3, #44]
+ adcs r4, r10, r4
+ ldr r10, [r3, #36]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r3, #40]
+ ldr r3, [r3, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r8, r1
+ ldr r1, [sp] @ 4-byte Reload
+ stm r0, {r2, r4, r7}
+ ldr r2, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [r0, #12]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #16]
+ adcs r2, r1, r12
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r12, r0, #32
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r1, r2
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #24]
+ adcs r2, r1, r9
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #28]
+ adcs r2, r3, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r3, r10, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r7, r1, lr
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #44]
+ adc r1, r5, r1
+ str r1, [r0, #48]
+.LBB202_2: @ %nocarry
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end202:
+ .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF13L
+ .align 2
+ .type mcl_fp_subNF13L,%function
+mcl_fp_subNF13L: @ @mcl_fp_subNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #72
+ sub sp, sp, #72
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r9, r1, #20
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r7, r11}
+ ldr r0, [r2, #8]
+ ldr r10, [r2, #12]
+ ldr r8, [r2, #16]
+ ldr lr, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ ldr r2, [r1, #8]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r9, {r4, r5, r9}
+ ldm r1, {r1, r6}
+ subs r7, r1, r7
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r6, r6, r11
+ str r7, [sp] @ 4-byte Spill
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r0, r0, r10
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, lr, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ sbc r0, r2, r1
+ ldr r1, [r3, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r3, {r2, lr}
+ ldr r1, [r3, #20]
+ ldr r5, [r3, #8]
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r4, [r3, #12]
+ ldr r8, [r3, #24]
+ ldr r9, [r3, #28]
+ adds r2, r7, r2
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ adcs r3, r6, lr
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs lr, r10, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r7, r1, r8
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r9
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r9, r11, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #32] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r1, #0
+ movge lr, r10
+ movge r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r2, [r12]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movge r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r12, #4]
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r8, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r12, #32]
+ movge r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r11, [r12, #36]
+ movge r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ movge r0, r2
+ cmp r1, #0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [r12, #44]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #48]
+ add sp, sp, #72
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end203:
+ .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add13L
+ .align 2
+ .type mcl_fpDbl_add13L,%function
+mcl_fpDbl_add13L: @ @mcl_fpDbl_add13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #152
+ sub sp, sp, #152
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #144] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #148] @ 4-byte Spill
+ adcs r4, r5, r8
+ ldr r8, [r2, #16]
+ adcs r7, r6, lr
+ str r4, [sp, #72] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #140] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r7, [r0, #8]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #32]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #44]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [r0, #48]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r6, r2, r7
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r6, [sp, #88] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str lr, [sp, #100] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r8, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r8, [sp, #116] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #148] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #108] @ 4-byte Spill
+ ldmib r3, {r2, r9, r12}
+ ldr r1, [r3, #20]
+ ldr r11, [r3]
+ ldr r10, [r3, #16]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ subs r11, r6, r11
+ sbcs r2, r5, r2
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #120] @ 4-byte Spill
+ sbcs r1, r4, r9
+ add r9, r3, #32
+ sbcs r12, r7, r12
+ ldm r9, {r5, r7, r9}
+ ldr r4, [r3, #44]
+ ldr r3, [r3, #48]
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r10, lr, r10
+ str r3, [sp, #80] @ 4-byte Spill
+ ldr r3, [sp, #124] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ sbcs lr, r3, r6
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ sbcs r4, r8, r3
+ ldr r3, [sp, #128] @ 4-byte Reload
+ sbcs r6, r3, r6
+ ldr r3, [sp, #132] @ 4-byte Reload
+ sbcs r5, r3, r5
+ ldr r3, [sp, #136] @ 4-byte Reload
+ sbcs r8, r3, r7
+ ldr r3, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs r9, r3, r9
+ ldr r3, [sp, #144] @ 4-byte Reload
+ sbcs r3, r3, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #120] @ 4-byte Spill
+ ldr r3, [sp, #148] @ 4-byte Reload
+ sbcs r3, r3, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r3, [sp, #104] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r11, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r11, [r0, #52]
+ movne r2, r7
+ str r2, [r0, #56]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movne r1, r2
+ cmp r3, #0
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [r0, #60]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne r10, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r10, [r0, #68]
+ movne lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r3, #0
+ str lr, [r0, #72]
+ movne r4, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r4, [r0, #76]
+ movne r6, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r6, [r0, #80]
+ movne r5, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r9, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r9, [r0, #92]
+ movne r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r3, #0
+ ldr r3, [sp, #104] @ 4-byte Reload
+ str r2, [r0, #96]
+ movne r3, r1
+ str r3, [r0, #100]
+ add sp, sp, #152
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end204:
+ .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub13L
+ .align 2
+ .type mcl_fpDbl_sub13L,%function
+mcl_fpDbl_sub13L: @ @mcl_fpDbl_sub13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #152
+ sub sp, sp, #152
+ ldr r7, [r2, #96]
+ add r10, r1, #32
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldm r2, {r9, lr}
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r12, [r2, #8]
+ ldr r4, [r1, #8]
+ ldr r8, [r2, #12]
+ ldr r7, [r1, #12]
+ subs r6, r6, r9
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ str r6, [sp, #80] @ 4-byte Spill
+ sbcs r6, r5, lr
+ add lr, r1, #16
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #36]
+ str r6, [sp, #48] @ 4-byte Spill
+ sbcs r6, r4, r12
+ sbcs r7, r7, r8
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [r2, #32]
+ ldr r8, [r2, #16]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r6, [sp, #40] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r8
+ str r7, [r0, #8]
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbcs r1, r6, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r2, r9, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r1, r10, r1
+ add r10, r3, #16
+ str r1, [r0, #48]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ sbcs r11, r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ mov r1, #0
+ sbcs r6, r7, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r6, [sp, #92] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #112] @ 4-byte Reload
+ sbcs r8, r7, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r8, [sp, #96] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #132] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #116] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r2, r7, r2
+ mov r7, r9
+ mov r9, r11
+ sbc r1, r1, #0
+ str r2, [sp, #148] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldm r3, {r1, r2, r12, lr}
+ ldm r10, {r3, r4, r5, r10}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adds r1, r7, r1
+ adcs r2, r9, r2
+ adcs r12, r11, r12
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs lr, r6, lr
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r3, r6, r3
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adcs r4, r8, r4
+ adcs r8, r6, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r10, r5, r10
+ ldr r5, [sp, #128] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #104] @ 4-byte Reload
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #136] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r5, [sp, #104] @ 4-byte Spill
+ ldr r5, [sp, #144] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #148] @ 4-byte Reload
+ adc r5, r5, r11
+ str r5, [sp, #112] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ands r5, r5, #1
+ moveq r1, r7
+ moveq r2, r9
+ str r1, [r0, #52]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #56]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r5, #0
+ str r12, [r0, #60]
+ moveq lr, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str lr, [r0, #64]
+ moveq r3, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r3, [r0, #68]
+ ldr r3, [sp, #112] @ 4-byte Reload
+ moveq r4, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ cmp r5, #0
+ str r4, [r0, #72]
+ moveq r8, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r8, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r10, [r0, #80]
+ moveq r2, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r5, #0
+ str r2, [r0, #84]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r2, [r0, #88]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #92]
+ ldr r2, [sp, #108] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r5, #0
+ str r2, [r0, #96]
+ moveq r3, r1
+ str r3, [r0, #100]
+ add sp, sp, #152
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv448x32,%function
+.LmulPv448x32: @ @mulPv448x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r1, [r1, #52]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #52]
+ adc r1, r7, #0
+ str r1, [r0, #56]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre14L
+ .align 2
+ .type mcl_fp_mulUnitPre14L,%function
+mcl_fp_mulUnitPre14L: @ @mcl_fp_mulUnitPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ mov r4, r0
+ add r0, sp, #8
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #64]
+ add lr, sp, #8
+ ldr r8, [sp, #56]
+ ldr r9, [sp, #52]
+ ldr r10, [sp, #48]
+ ldr r11, [sp, #44]
+ ldr r5, [sp, #40]
+ ldr r6, [sp, #36]
+ ldr r7, [sp, #32]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #60]
+ str r0, [sp] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ str r7, [r4, #24]
+ str r6, [r4, #28]
+ str r5, [r4, #32]
+ str r11, [r4, #36]
+ str r10, [r4, #40]
+ str r9, [r4, #44]
+ str r8, [r4, #48]
+ ldr r0, [sp] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #56]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre14L
+ .align 2
+ .type mcl_fpDbl_mulPre14L,%function
+mcl_fpDbl_mulPre14L: @ @mcl_fpDbl_mulPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #228
+ sub sp, sp, #228
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add r0, r4, #56
+ add r1, r5, #28
+ add r2, r6, #28
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [r6, #32]
+ add r11, r6, #36
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [r6, #52]
+ ldr r12, [r6]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldmib r6, {r1, r2, r3, r7}
+ ldr r0, [r6, #28]
+ ldr lr, [r6, #24]
+ ldr r6, [r6, #20]
+ adds r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ adcs r0, r2, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ adcs r0, r3, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ adcs r0, r7, r10
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, r6, r11
+ add r11, r5, #32
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ add lr, r5, #12
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ ldm r11, {r8, r10, r11}
+ ldr r7, [r5]
+ ldr r3, [r5, #4]
+ ldr r2, [r5, #8]
+ adc r6, r0, #0
+ ldr r0, [r5, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r5, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ ldm lr, {r1, r9, r12, lr}
+ adds r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ str r0, [sp, #144]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r7, r3, r8
+ adcs r10, r2, r10
+ add r2, sp, #116
+ str r7, [sp, #148]
+ adcs r11, r1, r11
+ add r1, sp, #144
+ str r10, [sp, #152]
+ str r11, [sp, #156]
+ adcs r5, r9, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r5, [sp, #160]
+ adcs r9, r12, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r9, [sp, #164]
+ adcs r8, lr, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r8, [sp, #168]
+ str r0, [sp, #116]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #120]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #124]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #128]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #132]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #136]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #140]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #172
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ cmp r6, #0
+ ldr r2, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ moveq r8, r6
+ moveq r9, r6
+ moveq r5, r6
+ moveq r11, r6
+ moveq r10, r6
+ cmp r6, #0
+ moveq r2, r6
+ moveq r7, r6
+ str r2, [sp, #112] @ 4-byte Spill
+ str r7, [sp, #76] @ 4-byte Spill
+ adds r3, r2, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs lr, r10, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r11, r1
+ adcs r2, r5, r2
+ adcs r12, r9, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r8, r7
+ str r7, [sp, #104] @ 4-byte Spill
+ mov r7, #0
+ adc r7, r7, #0
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [sp, #80] @ 4-byte Reload
+ cmp r7, #0
+ moveq r2, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ moveq r1, r11
+ moveq lr, r10
+ ldr r11, [sp, #104] @ 4-byte Reload
+ moveq r0, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ moveq r3, r5
+ cmp r7, #0
+ ldr r5, [sp, #108] @ 4-byte Reload
+ moveq r5, r7
+ and r7, r6, r7
+ ldr r6, [sp, #200]
+ moveq r12, r9
+ moveq r11, r8
+ adds r10, r3, r6
+ ldr r3, [sp, #204]
+ adcs r8, r0, r3
+ ldr r0, [sp, #208]
+ add r3, sp, #172
+ adcs r9, lr, r0
+ ldr r0, [sp, #212]
+ ldr lr, [r4]
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ adcs r0, r2, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ adcs r0, r12, r0
+ ldr r12, [r4, #4]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ adcs r0, r11, r0
+ ldr r11, [r4, #12]
+ str r0, [sp, #92] @ 4-byte Spill
+ adc r0, r5, r7
+ ldr r5, [r4, #8]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldm r3, {r0, r1, r2, r3}
+ subs lr, r0, lr
+ sbcs r12, r1, r12
+ ldr r1, [sp, #188]
+ sbcs r5, r2, r5
+ ldr r2, [r4, #36]
+ sbcs r0, r3, r11
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r11, [r4, #60]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r2, [sp, #112] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #192]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #196]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r6, r1, r0
+ ldr r0, [r4, #28]
+ sbcs r7, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #32]
+ ldr r10, [r4, #56]
+ sbcs r8, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r9, r9, r2
+ ldr r2, [r4, #40]
+ sbcs r0, r3, r2
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r4, #44]
+ ldr r3, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r4, #48]
+ ldr r3, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r4, #52]
+ ldr r3, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r3, [r4, #68]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbc r0, r2, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ subs r0, lr, r10
+ ldr lr, [r4, #76]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r12, r11
+ ldr r12, [r4, #72]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #64]
+ str r0, [sp, #36] @ 4-byte Spill
+ sbcs r0, r5, r0
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ sbcs r0, r5, r3
+ ldr r5, [r4, #80]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ sbcs r0, r6, r5
+ ldr r6, [r4, #84]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r7, r6
+ str r6, [sp, #92] @ 4-byte Spill
+ ldr r6, [r4, #88]
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, r8, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [r4, #92]
+ str r0, [sp, #16] @ 4-byte Spill
+ sbcs r0, r9, r6
+ add r9, r4, #96
+ str r6, [sp, #84] @ 4-byte Spill
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r6
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r7
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [r4, #36]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [r4, #44]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #52]
+ adcs r1, r10, r1
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r1, [r4, #56]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r4, #60]
+ adcs r1, r1, r2
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #64]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ adcs r1, r12, r1
+ str r0, [r4, #68]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ add r12, r4, #92
+ str r1, [r4, #72]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ adcs r1, r5, r1
+ str r0, [r4, #76]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [r4, #80]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [r4, #84]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #88]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r1, r6, #0
+ adcs r2, r7, #0
+ adcs r3, r8, #0
+ adc r7, r9, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ add sp, sp, #228
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre14L
+ .align 2
+ .type mcl_fpDbl_sqrPre14L,%function
+mcl_fpDbl_sqrPre14L: @ @mcl_fpDbl_sqrPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #220
+ sub sp, sp, #220
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add r1, r5, #28
+ add r0, r4, #56
+ mov r2, r1
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [r5, #44]
+ ldr r11, [r5, #32]
+ ldr r10, [r5, #36]
+ ldr r8, [r5, #40]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #48]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm r5, {r6, r7}
+ ldr r0, [r5, #28]
+ ldr r3, [r5, #8]
+ ldr r2, [r5, #12]
+ ldr r12, [r5, #16]
+ ldr lr, [r5, #24]
+ ldr r1, [r5, #20]
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adds r9, r6, r0
+ adcs r0, r7, r11
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r9, [sp, #136]
+ str r9, [sp, #108]
+ adcs r3, r3, r10
+ str r0, [sp, #140]
+ str r0, [sp, #112]
+ adcs r2, r2, r8
+ str r3, [sp, #144]
+ str r3, [sp, #116]
+ adcs r6, r12, r5
+ str r2, [sp, #148]
+ str r2, [sp, #120]
+ adcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r6, [sp, #152]
+ str r6, [sp, #124]
+ lsr r5, r1, #31
+ str r1, [sp, #156]
+ str r1, [sp, #128]
+ adcs r8, lr, r7
+ orr r5, r5, r8, lsl #1
+ str r8, [sp, #160]
+ str r8, [sp, #132]
+ str r5, [sp, #104] @ 4-byte Spill
+ lsr r5, r6, #31
+ orr r1, r5, r1, lsl #1
+ str r1, [sp, #100] @ 4-byte Spill
+ lsr r1, r2, #31
+ orr r1, r1, r6, lsl #1
+ str r1, [sp, #96] @ 4-byte Spill
+ lsr r1, r3, #31
+ orr r1, r1, r2, lsl #1
+ add r2, sp, #108
+ str r1, [sp, #92] @ 4-byte Spill
+ lsr r1, r0, #31
+ orr r1, r1, r3, lsl #1
+ str r1, [sp, #84] @ 4-byte Spill
+ lsr r1, r9, #31
+ orr r0, r1, r0, lsl #1
+ add r1, sp, #136
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r6, r0, #0
+ add r0, sp, #164
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add lr, sp, #204
+ add r7, sp, #192
+ ldm lr, {r5, r10, r11, lr}
+ ldm r7, {r0, r1, r7}
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r3, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ adds r0, r0, r9, lsl #1
+ mov r9, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r12, r7, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r1, r5, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r2, r11, r2
+ adcs r3, lr, r3
+ adc r8, r6, r8, lsr #31
+ cmp r6, #0
+ moveq r0, r10
+ moveq r1, r5
+ moveq r3, lr
+ moveq r2, r11
+ moveq r12, r7
+ cmp r6, #0
+ ldr lr, [r4]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ moveq r8, r6
+ str r2, [sp, #100] @ 4-byte Spill
+ mov r5, r3
+ ldr r3, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r7, r8
+ add r8, sp, #164
+ moveq r3, r9
+ ldmib r4, {r9, r10, r11}
+ moveq r2, r0
+ ldm r8, {r0, r1, r8}
+ ldr r6, [sp, #176]
+ subs lr, r0, lr
+ sbcs r0, r1, r9
+ ldr r1, [sp, #180]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r8, r10
+ ldr r10, [r4, #56]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r6, r11
+ ldr r11, [r4, #60]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #184]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #188]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r6, r1, r0
+ ldr r1, [r4, #28]
+ ldr r0, [r4, #32]
+ sbcs r9, r2, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r8, r3, r0
+ ldr r0, [r4, #36]
+ ldr r3, [r4, #68]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r12, r0
+ ldr r12, [r4, #72]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #84] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #44]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #96] @ 4-byte Spill
+ sbcs r0, r0, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #64]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r4, #52]
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ sbcs r0, r5, r0
+ ldr r5, [r4, #80]
+ str r0, [sp, #44] @ 4-byte Spill
+ sbc r0, r7, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ subs r0, lr, r10
+ ldr lr, [r4, #76]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r6, r5
+ ldr r6, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #84]
+ str r0, [sp, #80] @ 4-byte Spill
+ sbcs r0, r9, r0
+ add r9, r4, #96
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r8, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r4, #92]
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r6
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r7
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adds r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #36]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #44]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [r4, #52]
+ adcs r1, r10, r1
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r1, [r4, #56]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r4, #60]
+ adcs r1, r1, r2
+ ldr r0, [sp] @ 4-byte Reload
+ str r1, [r4, #64]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r3, r0
+ adcs r1, r12, r1
+ str r0, [r4, #68]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ add r12, r4, #92
+ str r1, [r4, #72]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ adcs r1, r5, r1
+ str r0, [r4, #76]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [r4, #80]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [r4, #84]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #88]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r1, r6, #0
+ adcs r2, r7, #0
+ adcs r3, r8, #0
+ adc r7, r9, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ add sp, sp, #220
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont14L
+ .align 2
+ .type mcl_fp_mont14L,%function
+mcl_fp_mont14L: @ @mcl_fp_mont14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #892
+ sub sp, sp, #892
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #108
+ add r7, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, r7, #824
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #104] @ 4-byte Spill
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1852]
+ ldr r5, [sp, #1848]
+ add r8, sp, #1024
+ mov r1, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ mul r2, r5, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1892]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1888]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1884]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1880]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r8, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1840]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r10, [sp, #1808]
+ ldr r11, [sp, #1804]
+ ldr r7, [sp, #1800]
+ ldr r9, [sp, #1784]
+ ldr r4, [sp, #1788]
+ ldr r6, [sp, #1792]
+ ldr r8, [sp, #1796]
+ add lr, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r9, r5
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1736]
+ ldr r12, [sp, #1740]
+ ldr lr, [sp, #1744]
+ ldr r5, [sp, #1752]
+ ldr r9, [sp, #1760]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1748]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1720]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1756]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r10, r1
+ ldr r10, [sp, #1764]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1732]
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #1728]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1712]
+ add r11, sp, #1664
+ ldr r8, [sp, #1684]
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1676]
+ ldr r4, [sp, #1656]
+ ldr r7, [sp, #1660]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1688]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1604]
+ ldr r3, [sp, #1608]
+ ldr r12, [sp, #1612]
+ ldr lr, [sp, #1616]
+ adds r0, r0, r4
+ ldr r4, [sp, #1620]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1624]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1592]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1636]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1632]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1628]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1584]
+ add r11, sp, #1536
+ ldr r8, [sp, #1556]
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1548]
+ ldr r4, [sp, #1528]
+ ldr r7, [sp, #1532]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1476]
+ ldr r3, [sp, #1480]
+ ldr r12, [sp, #1484]
+ ldr lr, [sp, #1488]
+ adds r0, r0, r4
+ ldr r4, [sp, #1492]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1496]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1464]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1508]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1504]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1500]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1512]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #376
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1456]
+ add r11, sp, #1408
+ ldr r8, [sp, #1428]
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1420]
+ ldr r4, [sp, #1400]
+ ldr r7, [sp, #1404]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #312
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1348]
+ ldr r3, [sp, #1352]
+ ldr r12, [sp, #1356]
+ ldr lr, [sp, #1360]
+ adds r0, r0, r4
+ ldr r4, [sp, #1364]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1368]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1336]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1380]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1376]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1372]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #248
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1328]
+ add r11, sp, #1280
+ ldr r8, [sp, #1300]
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1292]
+ ldr r4, [sp, #1272]
+ ldr r7, [sp, #1276]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #184
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1220]
+ ldr r3, [sp, #1224]
+ ldr r12, [sp, #1228]
+ ldr lr, [sp, #1232]
+ adds r0, r0, r4
+ ldr r4, [sp, #1236]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1240]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1252]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1248]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1244]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #120
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1200]
+ add r11, sp, #1152
+ ldr r8, [sp, #1172]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r4, [sp, #1144]
+ ldr r7, [sp, #1148]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #56
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ adds r0, r0, r4
+ ldr r4, [sp, #1108]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1112]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1080]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1124]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1120]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1116]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1016
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1072]
+ add r11, sp, #1024
+ ldr r8, [sp, #1044]
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1036]
+ ldr r4, [sp, #1016]
+ ldr r7, [sp, #1020]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #952
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #956
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #888
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #944]
+ add r11, sp, #896
+ ldr r8, [sp, #916]
+ ldr r9, [sp, #912]
+ ldr r10, [sp, #908]
+ ldr r4, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #824
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #828
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #852
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #872]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #824]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #816]
+ add r11, sp, #768
+ ldr r8, [sp, #788]
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #780]
+ ldr r4, [sp, #760]
+ ldr r7, [sp, #764]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #696
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #700
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #724
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #696]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r6, r5
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #688]
+ add r11, sp, #632
+ ldr r6, [sp, #656]
+ ldr r4, [sp, #652]
+ ldr r7, [sp, #648]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #584
+ adds r0, r0, r8
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #608
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #568
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r11, {r4, r6, r7, r11}
+ adds r0, r2, r4
+ mul r1, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r1, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r6, r5, r7
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r6, [sp, #92] @ 4-byte Spill
+ adcs r6, r5, r11
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #88] @ 4-byte Spill
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #560]
+ add r10, sp, #504
+ ldr r11, [sp, #532]
+ ldr r4, [sp, #528]
+ ldr r6, [sp, #524]
+ ldr r7, [sp, #520]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #456
+ adds r0, r0, r5
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #480
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r7
+ add r7, sp, #440
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldr r5, [sp, #452]
+ adds r1, r0, r4
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #496]
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #432]
+ ldr r8, [sp, #404]
+ ldr r9, [sp, #400]
+ ldr r10, [sp, #396]
+ ldr r11, [sp, #392]
+ ldr r6, [sp, #376]
+ ldr r5, [sp, #380]
+ ldr r7, [sp, #384]
+ ldr r4, [sp, #388]
+ add r0, sp, #312
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #412]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #408]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #316
+ adds r0, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #340
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #248
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #304]
+ ldr r10, [sp, #272]
+ ldr r11, [sp, #268]
+ ldr r8, [sp, #264]
+ ldr r6, [sp, #248]
+ ldr r7, [sp, #252]
+ ldr r4, [sp, #256]
+ ldr r9, [sp, #260]
+ add r0, sp, #184
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #284]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #280]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #276]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r3, r0, r7
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #184
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #224
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldm r8, {r4, r7, r8}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r5, [sp, #196]
+ adds r4, r3, r4
+ mul r1, r4, r0
+ ldr r0, [sp, #240]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r6, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r8, r7, r8
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ mov r0, #0
+ adc r7, r0, #0
+ add r0, sp, #120
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r4, r11, r1
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r6, r8, r2
+ str r4, [sp, #36] @ 4-byte Spill
+ adcs r12, r5, r3
+ str r6, [sp, #48] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #140]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adc r0, r7, #0
+ mov r7, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldmib r7, {r1, r2, r3, r10, r11, lr}
+ ldr r5, [r7]
+ ldr r0, [r7, #28]
+ ldr r9, [r7, #44]
+ subs r5, r4, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r7, #40]
+ sbcs r6, r6, r1
+ ldr r1, [r7, #32]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r8, r3
+ ldr r3, [r7, #48]
+ ldr r8, [r7, #36]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r7, #52]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r3, [sp, #116] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ sbcs r10, r3, r10
+ ldr r3, [sp, #76] @ 4-byte Reload
+ sbcs r3, r3, r11
+ sbcs lr, r7, lr
+ ldr r7, [sp, #88] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #92] @ 4-byte Reload
+ sbcs r7, r7, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #100] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r9, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ movne r5, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [r0]
+ ldr r5, [sp, #48] @ 4-byte Reload
+ movne r6, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ str r6, [r0, #4]
+ movne r2, r5
+ cmp r1, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r12, [r0, #12]
+ movne r10, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r10, [r0, #16]
+ movne r3, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ movne lr, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #28]
+ movne r7, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r7, [r0, #32]
+ movne r8, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r8, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r9, r2
+ cmp r1, #0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r9, [r0, #44]
+ movne r2, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #52]
+ add sp, sp, #892
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end210:
+ .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF14L
+ .align 2
+ .type mcl_fp_montNF14L,%function
+mcl_fp_montNF14L: @ @mcl_fp_montNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #892
+ sub sp, sp, #892
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #108
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, r6, #824
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #104] @ 4-byte Spill
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1852]
+ ldr r8, [sp, #1848]
+ add r10, sp, #1024
+ mov r1, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ mul r2, r8, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1892]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1888]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1884]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1880]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, r10, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1840]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r11, [sp, #1808]
+ ldr r6, [sp, #1804]
+ ldr r7, [sp, #1800]
+ ldr r5, [sp, #1784]
+ ldr r9, [sp, #1788]
+ ldr r10, [sp, #1792]
+ ldr r4, [sp, #1796]
+ add lr, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1720]
+ ldr r2, [sp, #1732]
+ ldr r3, [sp, #1736]
+ ldr r12, [sp, #1740]
+ ldr lr, [sp, #1744]
+ ldr r8, [sp, #1760]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1764]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1768]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1748]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #1756]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1752]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r11, r11, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #1728]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1712]
+ add r11, sp, #1664
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1676]
+ ldr r6, [sp, #1656]
+ ldr r7, [sp, #1660]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1692]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1688]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1684]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #568
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1592]
+ ldr r2, [sp, #1604]
+ ldr r3, [sp, #1608]
+ ldr r12, [sp, #1612]
+ ldr lr, [sp, #1616]
+ ldr r6, [sp, #1624]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1628]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1620]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1632]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1640]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1636]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1584]
+ add r11, sp, #1536
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1548]
+ ldr r6, [sp, #1528]
+ ldr r7, [sp, #1532]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #440
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1464]
+ ldr r2, [sp, #1476]
+ ldr r3, [sp, #1480]
+ ldr r12, [sp, #1484]
+ ldr lr, [sp, #1488]
+ ldr r6, [sp, #1496]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1500]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1492]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1504]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1512]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1508]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #376
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1456]
+ add r11, sp, #1408
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1420]
+ ldr r6, [sp, #1400]
+ ldr r7, [sp, #1404]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #312
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1336]
+ ldr r2, [sp, #1348]
+ ldr r3, [sp, #1352]
+ ldr r12, [sp, #1356]
+ ldr lr, [sp, #1360]
+ ldr r6, [sp, #1368]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1372]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1364]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1376]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1384]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1380]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #248
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1328]
+ add r11, sp, #1280
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1292]
+ ldr r6, [sp, #1272]
+ ldr r7, [sp, #1276]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #184
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1208]
+ ldr r2, [sp, #1220]
+ ldr r3, [sp, #1224]
+ ldr r12, [sp, #1228]
+ ldr lr, [sp, #1232]
+ ldr r6, [sp, #1240]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1244]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1236]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1248]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1256]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1252]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #120
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1200]
+ add r11, sp, #1152
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r6, [sp, #1144]
+ ldr r7, [sp, #1148]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1172]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #56
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1080]
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ ldr r6, [sp, #1112]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1116]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1108]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1120]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1128]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1124]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1016
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1072]
+ add r11, sp, #1024
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1036]
+ ldr r6, [sp, #1016]
+ ldr r7, [sp, #1020]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #952
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #888
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #944]
+ add r11, sp, #896
+ ldr r9, [sp, #912]
+ ldr r10, [sp, #908]
+ ldr r6, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #824
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #828
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #852
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #824]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #816]
+ add r11, sp, #768
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #780]
+ ldr r6, [sp, #760]
+ ldr r7, [sp, #764]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #700
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #724
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r8, r9, r10}
+ ldr r5, [sp, #696]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r5, r11, r5
+ adcs r0, r7, r0
+ str r5, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r5, r9
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #688]
+ add r11, sp, #640
+ ldr r5, [sp, #656]
+ ldr r10, [sp, #652]
+ ldr r6, [sp, #632]
+ ldr r7, [sp, #636]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #584
+ adds r0, r0, r6
+ ldr r6, [sp, #580]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #572]
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #608
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #576]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #568]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r0, r0, r2
+ mul r1, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r1, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r7, r11, r7
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r5, r7, r5
+ str r5, [sp, #92] @ 4-byte Spill
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #560]
+ add r10, sp, #508
+ ldr r7, [sp, #532]
+ ldr r8, [sp, #528]
+ ldr r9, [sp, #524]
+ ldr r11, [sp, #504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #456
+ adds r0, r0, r11
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ ldr r5, [sp, #448]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #452]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #480
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #444]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #440]
+ str r1, [sp, #36] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #496]
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r7, r11, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r5, r7, r5
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #44] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #432]
+ add r10, sp, #380
+ ldr r7, [sp, #404]
+ ldr r8, [sp, #400]
+ ldr r9, [sp, #396]
+ ldr r11, [sp, #376]
+ add r0, sp, #312
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #412]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #408]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #316
+ adds r0, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #340
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #248
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #304]
+ ldr r10, [sp, #272]
+ ldr r11, [sp, #268]
+ ldr r8, [sp, #264]
+ ldr r6, [sp, #248]
+ ldr r7, [sp, #252]
+ ldr r4, [sp, #256]
+ ldr r9, [sp, #260]
+ add r0, sp, #184
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #284]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #280]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #276]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r1, r1, r4
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r8
+ add r8, sp, #184
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #224
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #60] @ 4-byte Spill
+ ldm r8, {r2, r7, r8}
+ ldr r6, [sp, #196]
+ adds r4, r0, r2
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r1, r4, r0
+ ldr r0, [sp, #240]
+ str r1, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r8, r7, r8
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r7, r0, r5
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ add r0, sp, #120
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ mov r4, r5
+ adcs r11, r11, r1
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r11, [sp, #44] @ 4-byte Spill
+ adcs lr, r6, r3
+ str r2, [sp, #52] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #140]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r10, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r10, [sp, #68] @ 4-byte Spill
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adc r0, r1, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldmib r4, {r0, r1, r7, r9, r12}
+ ldr r6, [r4]
+ ldr r3, [r4, #24]
+ ldr r5, [r4, #28]
+ subs r6, r11, r6
+ str r3, [sp, #72] @ 4-byte Spill
+ add r11, r4, #32
+ sbcs r3, r2, r0
+ sbcs r2, lr, r1
+ ldm r11, {r0, r1, r11}
+ sbcs lr, r8, r7
+ ldr r7, [r4, #44]
+ ldr r8, [r4, #52]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r4, #48]
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbcs r9, r4, r9
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ sbcs r12, r4, r12
+ ldr r4, [sp, #88] @ 4-byte Reload
+ sbcs r4, r4, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ sbcs r5, r7, r5
+ sbcs r7, r10, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r10, r0, r1
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbc r8, r0, r8
+ ldr r0, [sp, #44] @ 4-byte Reload
+ asr r1, r8, #31
+ cmp r1, #0
+ movlt r6, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r6, [r0]
+ ldr r6, [sp, #52] @ 4-byte Reload
+ movlt r3, r6
+ str r3, [r0, #4]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movlt r2, r3
+ cmp r1, #0
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r9, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r9, [r0, #16]
+ movlt r12, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r7, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r7, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r11, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r11, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [r0, #48]
+ movlt r8, r1
+ str r8, [r0, #52]
+ add sp, sp, #892
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end211:
+ .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed14L
+ .align 2
+ .type mcl_fp_montRed14L,%function
+mcl_fp_montRed14L: @ @mcl_fp_montRed14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r6, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #184] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #188] @ 4-byte Spill
+ mul r2, r6, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #1024
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1080]
+ ldr r8, [sp, #1024]
+ ldr r1, [sp, #1032]
+ ldr r2, [sp, #1036]
+ ldr r3, [sp, #1040]
+ ldr r12, [sp, #1044]
+ ldr lr, [sp, #1048]
+ ldr r4, [sp, #1052]
+ ldr r5, [sp, #1056]
+ ldr r7, [sp, #1060]
+ ldr r9, [sp, #1064]
+ ldr r10, [sp, #1068]
+ ldr r11, [sp, #1072]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ adds r6, r6, r8
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ adcs r8, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #188] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #960
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1016]
+ add lr, sp, #996
+ add r10, sp, #964
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r4, r5, r12, lr}
+ ldr r6, [sp, #960]
+ ldr r7, [sp, #992]
+ ldr r11, [sp, #988]
+ ldr r3, [sp, #984]
+ ldm r10, {r0, r1, r2, r9, r10}
+ adds r6, r8, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r8, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r8, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #896
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #952]
+ add r10, sp, #924
+ add lr, sp, #900
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r4, [sp, #896]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #832
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #888]
+ add lr, sp, #872
+ add r11, sp, #832
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r6, [sp, #868]
+ ldr r7, [sp, #864]
+ ldm r11, {r0, r1, r2, r3, r8, r9, r10, r11}
+ adds r0, r4, r0
+ ldr r4, [sp, #188] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #768
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #824]
+ add r10, sp, #796
+ add lr, sp, #784
+ add r9, sp, #768
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r10}
+ ldm lr, {r3, r12, lr}
+ ldm r9, {r0, r1, r2, r9}
+ adds r0, r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #704
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #760]
+ add lr, sp, #744
+ add r9, sp, #708
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #756]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r4, [sp, #704]
+ ldr r6, [sp, #740]
+ ldr r7, [sp, #736]
+ ldr r11, [sp, #732]
+ ldr r3, [sp, #728]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #188] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mul r2, r4, r5
+ ldr r4, [sp, #184] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #696]
+ add r10, sp, #664
+ add lr, sp, #640
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r5
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #576
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #632]
+ add lr, sp, #616
+ add r9, sp, #580
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #628]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r4, [sp, #576]
+ ldr r6, [sp, #612]
+ ldr r7, [sp, #608]
+ ldr r11, [sp, #604]
+ ldr r3, [sp, #600]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #568]
+ add r11, sp, #536
+ add lr, sp, #512
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #504]
+ add lr, sp, #484
+ add r9, sp, #452
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldr r4, [sp, #448]
+ ldr r7, [sp, #480]
+ ldr r11, [sp, #476]
+ ldr r3, [sp, #472]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #440]
+ add r11, sp, #408
+ add lr, sp, #384
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #376]
+ add r9, sp, #348
+ ldr r11, [sp, #364]
+ ldr r8, [sp, #360]
+ add lr, sp, #328
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r9}
+ ldr r3, [sp, #320]
+ ldr r5, [sp, #324]
+ ldm lr, {r0, r1, r2, r12, lr}
+ adds r3, r10, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ adcs r5, r3, r5
+ ldr r3, [sp, #84] @ 4-byte Reload
+ adcs r10, r3, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #188] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r5, r6
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv448x32(PLT)
+ add r7, sp, #256
+ add r12, sp, #272
+ ldm r7, {r0, r1, r3, r7}
+ ldr r9, [sp, #312]
+ ldr r8, [sp, #308]
+ ldr lr, [sp, #304]
+ adds r0, r5, r0
+ ldr r5, [sp, #300]
+ adcs r10, r10, r1
+ mul r0, r10, r6
+ ldr r6, [sp, #296]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #292]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #120] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r8, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #192
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #76] @ 4-byte Spill
+ adcs r3, r0, r3
+ ldr r0, [sp, #208]
+ str r3, [sp, #80] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #212]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r7, [sp, #84] @ 4-byte Spill
+ adcs r4, r4, r0
+ ldr r0, [sp, #216]
+ str r4, [sp, #88] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #220]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #224]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #228]
+ adcs r11, r1, r0
+ ldr r0, [sp, #232]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r11, [sp, #100] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ adcs r10, r1, r0
+ ldr r0, [sp, #240]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r9, [sp, #116] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #248]
+ str r8, [sp, #120] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #172] @ 4-byte Reload
+ str r12, [sp, #112] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176] @ 4-byte Reload
+ subs r0, lr, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #152] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #156] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r4, [sp, #160] @ 4-byte Reload
+ ldr r7, [sp, #184] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #164] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #124] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r7, r11, r7
+ ldr r11, [sp, #188] @ 4-byte Reload
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r11, r11, r7
+ ldr r7, [sp, #136] @ 4-byte Reload
+ sbcs r7, r10, r7
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [sp, #140] @ 4-byte Reload
+ sbcs r9, r9, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ sbcs r10, r8, r7
+ ldr r7, [sp, #148] @ 4-byte Reload
+ sbcs r8, r12, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r12, r7, #1
+ ldr r7, [sp, #72] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #180] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ cmp r12, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne lr, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str lr, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ cmp r12, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ str r5, [r7, #24]
+ movne r6, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ movne r0, r1
+ str r6, [r7, #28]
+ cmp r12, #0
+ str r0, [r7, #32]
+ ldr r0, [sp, #188] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r11, [r7, #36]
+ ldr r11, [sp, #176] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r11, [r7, #40]
+ movne r9, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ cmp r12, #0
+ str r9, [r7, #44]
+ movne r10, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r10, [r7, #48]
+ movne r8, r0
+ str r8, [r7, #52]
+ add sp, sp, #68
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end212:
+ .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre14L
+ .align 2
+ .type mcl_fp_addPre14L,%function
+mcl_fp_addPre14L: @ @mcl_fp_addPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r11, [r2]
+ ldr r4, [r2, #16]
+ ldr r10, [r1, #44]
+ adds r8, r11, r3
+ ldr r3, [r2, #32]
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r11, [r1, #48]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ ldr lr, [r1, #40]
+ adcs r7, r7, r9
+ ldr r9, [r1, #52]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r1, #32]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ ldr r2, [r2, #52]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r5, r2
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r12
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ adcs r2, r2, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r7, r7, r10
+ adcs r6, r6, r11
+ stm r12, {r1, r2, r3, r7}
+ adcs r5, r5, r9
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end213:
+ .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre14L
+ .align 2
+ .type mcl_fp_subPre14L,%function
+mcl_fp_subPre14L: @ @mcl_fp_subPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldmib r2, {r10, r11}
+ ldr r3, [r2, #16]
+ ldr r7, [r1]
+ ldr r6, [r2, #12]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2]
+ ldmib r1, {r4, r5, r12}
+ subs lr, r7, r3
+ ldr r3, [r2, #32]
+ sbcs r4, r4, r10
+ sbcs r5, r5, r11
+ add r11, r1, #32
+ sbcs r6, r12, r6
+ add r12, r1, #16
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ ldr r2, [r2, #52]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r11, {r7, r10, r11}
+ ldr r2, [r1, #52]
+ ldr r8, [r1, #44]
+ ldr r9, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ stmib r0, {r4, r5}
+ str r6, [r0, #12]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r2, r12, r2
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r1, r7, r1
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r3, r11, r3
+ sbcs r7, r8, r7
+ sbcs r6, r9, r6
+ stm r12, {r1, r2, r3, r7}
+ sbcs r5, r4, r5
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end214:
+ .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_14L
+ .align 2
+ .type mcl_fp_shr1_14L,%function
+mcl_fp_shr1_14L: @ @mcl_fp_shr1_14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ add r9, r1, #8
+ add r12, r1, #32
+ ldm r9, {r2, r3, r4, r5, r6, r9}
+ ldm r1, {r7, lr}
+ str r7, [sp, #4] @ 4-byte Spill
+ lsr r7, lr, #1
+ orr r7, r7, r2, lsl #31
+ str r7, [sp] @ 4-byte Spill
+ ldm r12, {r7, r11, r12}
+ ldr r10, [r1, #48]
+ ldr r8, [r1, #44]
+ ldr r1, [r1, #52]
+ str r1, [sp, #12] @ 4-byte Spill
+ lsr r1, r3, #1
+ lsrs r3, r3, #1
+ str r10, [sp, #8] @ 4-byte Spill
+ rrx r2, r2
+ lsrs r3, lr, #1
+ orr r1, r1, r4, lsl #31
+ ldr r3, [sp, #4] @ 4-byte Reload
+ rrx r3, r3
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ lsrs r1, r5, #1
+ lsr r2, r11, #1
+ rrx r1, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ orr r2, r2, r12, lsl #31
+ str r1, [r0, #16]
+ lsr r1, r5, #1
+ ldr r5, [sp, #12] @ 4-byte Reload
+ orr r1, r1, r6, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r9, #1
+ rrx r1, r6
+ str r1, [r0, #24]
+ lsr r1, r9, #1
+ orr r1, r1, r7, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r11, #1
+ rrx r1, r7
+ lsrs r3, r8, #1
+ lsr r7, r8, #1
+ rrx r3, r12
+ lsrs r6, r5, #1
+ orr r7, r7, r4, lsl #31
+ add r12, r0, #32
+ lsr r5, r5, #1
+ rrx r6, r4
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end215:
+ .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add14L
+ .align 2
+ .type mcl_fp_add14L,%function
+mcl_fp_add14L: @ @mcl_fp_add14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r10, r5, r8
+ ldr r5, [r1, #20]
+ str r9, [r0]
+ adcs r6, r6, lr
+ mov lr, r10
+ adcs r7, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str lr, [r0, #4]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r7, r7, r6
+ ldr r6, [r2, #44]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r10, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ str r4, [r0, #8]
+ adcs r7, r5, r7
+ ldr r5, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r5, r7
+ ldr r5, [r2, #36]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r11, r5, r7
+ ldr r7, [r1, #40]
+ ldr r5, [r2, #40]
+ str r11, [sp, #24] @ 4-byte Spill
+ adcs r8, r5, r7
+ ldr r7, [r1, #44]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r8, [sp, #12] @ 4-byte Spill
+ adcs r12, r6, r7
+ ldr r7, [r1, #48]
+ ldr r6, [r2, #48]
+ ldr r1, [r1, #52]
+ ldr r2, [r2, #52]
+ str r5, [r0, #12]
+ str r12, [sp, #8] @ 4-byte Spill
+ adcs r6, r6, r7
+ adcs r2, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #20]
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #32]
+ str r11, [r0, #36]
+ str r8, [r0, #40]
+ str r12, [r0, #44]
+ str r6, [r0, #48]
+ str r2, [r0, #52]
+ mov r8, r2
+ mov r2, #0
+ mov r12, r6
+ add r11, r3, #32
+ adc r1, r2, #0
+ str r1, [sp, #20] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r6, r9, r6
+ sbcs r7, lr, r7
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp] @ 4-byte Spill
+ str r1, [sp, #32] @ 4-byte Spill
+ sbcs r1, r5, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r10, r10, r1
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r5, r2, r1
+ ldm r11, {r1, r2, r6, r7, r11}
+ ldr r9, [r3, #52]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs lr, r1, r2
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r4, r1, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r7, r1, r7
+ sbcs r6, r12, r11
+ sbcs r1, r8, r9
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB216_2
+@ BB#1: @ %nocarry
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #20]
+ str r2, [r0, #24]
+ str r5, [r0, #28]
+ str r3, [r0, #32]
+ str lr, [r0, #36]
+ str r4, [r0, #40]
+ str r7, [r0, #44]
+ str r6, [r0, #48]
+ str r1, [r0, #52]
+.LBB216_2: @ %carry
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end216:
+ .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF14L
+ .align 2
+ .type mcl_fp_addNF14L,%function
+mcl_fp_addNF14L: @ @mcl_fp_addNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ adds r4, r4, r7
+ ldr r7, [r2, #16]
+ adcs r5, r5, r8
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #24]
+ adcs lr, r6, lr
+ ldr r6, [r1, #16]
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ adcs r9, r10, r12
+ str lr, [sp, #12] @ 4-byte Spill
+ str r9, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #20]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r7, r6, r5
+ ldr r6, [r2, #24]
+ ldr r5, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r8, r6, r4
+ ldr r6, [r1, #28]
+ str r8, [sp, #20] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #32]
+ ldr r5, [r2, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ str r7, [sp, #68] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #48]
+ ldr r5, [r2, #48]
+ ldr r1, [r1, #52]
+ ldr r2, [r2, #52]
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r7, r5, r6
+ adc r1, r2, r1
+ str r7, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #72] @ 4-byte Spill
+ ldmib r3, {r1, r4, r6}
+ ldr r2, [r3, #24]
+ ldr r7, [r3]
+ ldr r5, [r3, #16]
+ ldr r11, [r3, #20]
+ ldr r10, [r3, #40]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ subs r7, r2, r7
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r2, r1
+ ldr r1, [r3, #36]
+ sbcs r12, lr, r4
+ sbcs lr, r9, r6
+ ldr r9, [r3, #32]
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r3, r1, r11
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs r4, r8, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r11, r1, r9
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r9, r1, r6
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp] @ 4-byte Reload
+ sbcs r1, r1, r10
+ ldr r10, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbc r6, r1, r6
+ asr r1, r6, #31
+ cmp r1, #0
+ movlt r7, r10
+ str r7, [r0]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ movlt r2, r7
+ str r2, [r0, #4]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #8]
+ movlt lr, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r5, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r5, [r0, #16]
+ movlt r3, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r0, #32]
+ movlt r9, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r9, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #48]
+ movlt r6, r1
+ str r6, [r0, #52]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end217:
+ .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub14L
+ .align 2
+ .type mcl_fp_sub14L,%function
+mcl_fp_sub14L: @ @mcl_fp_sub14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r8, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ str r8, [sp, #40] @ 4-byte Spill
+ sbcs r10, r7, r4
+ ldr r7, [r1, #24]
+ ldr r4, [r1, #40]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r9, r7, r5
+ ldr r7, [r2, #28]
+ sbcs r11, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs r12, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r12, [sp, #28] @ 4-byte Spill
+ sbcs r6, r6, r7
+ ldr r7, [r2, #40]
+ sbcs r5, r4, r7
+ ldr r7, [r2, #44]
+ ldr r4, [r1, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ sbcs lr, r4, r7
+ ldr r4, [r2, #48]
+ ldr r7, [r1, #48]
+ ldr r2, [r2, #52]
+ ldr r1, [r1, #52]
+ sbcs r7, r7, r4
+ ldr r4, [sp, #44] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #8]
+ str r4, [r0, #12]
+ str r8, [r0, #16]
+ mov r1, lr
+ add r8, r0, #24
+ str r10, [r0, #20]
+ stm r8, {r9, r11, r12}
+ str r6, [r0, #36]
+ str r5, [r0, #40]
+ str r1, [r0, #44]
+ str r7, [r0, #48]
+ mov r8, r2
+ str r2, [r0, #52]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB218_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #52]
+ ldr r5, [r3, #48]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #52] @ 4-byte Reload
+ ldr lr, [r3, #4]
+ ldr r12, [r3, #8]
+ ldr r10, [r3, #12]
+ ldr r11, [r3, #40]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3]
+ adds r2, r2, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs lr, lr, r5
+ ldr r5, [r3, #44]
+ adcs r7, r12, r7
+ add r12, r0, #32
+ str r5, [sp, #48] @ 4-byte Spill
+ adcs r5, r10, r4
+ ldr r10, [r3, #36]
+ ldr r3, [r3, #32]
+ stm r0, {r2, lr}
+ str r7, [r0, #8]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r4, [sp, #36] @ 4-byte Reload
+ str r5, [r0, #12]
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r4, r7, r4
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r4, [r0, #20]
+ str r2, [r0, #24]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r3, r2
+ adcs r3, r10, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r7, r11, r7
+ adcs r6, r6, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #44]
+ str r5, [r0, #48]
+ adc r1, r1, r8
+ str r1, [r0, #52]
+.LBB218_2: @ %nocarry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end218:
+ .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF14L
+ .align 2
+ .type mcl_fp_subNF14L,%function
+mcl_fp_subNF14L: @ @mcl_fp_subNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r7, r1, #16
+ ldr r9, [r2]
+ ldr r11, [r2, #20]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r2, #4]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r2, #8]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r2, #16]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldmib r1, {r2, r8, lr}
+ ldm r7, {r4, r5, r6, r7}
+ ldr r10, [r1]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [r1, #32]
+ subs r10, r10, r9
+ sbcs r9, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r10, [sp] @ 4-byte Spill
+ str r9, [sp, #4] @ 4-byte Spill
+ sbcs r0, r8, r0
+ add r8, r3, #20
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r5, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r3, {r2, r4, r6}
+ ldr r5, [r3, #12]
+ ldr lr, [r3, #16]
+ ldm r8, {r0, r7, r8}
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adds r1, r10, r2
+ ldr r10, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r4, r9, r4
+ adcs r6, r10, r6
+ adcs r2, r2, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs lr, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r7
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r9, r11, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r7, r0, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ asr r7, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r7, #0
+ movge r6, r10
+ movge r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r12]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ movge r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r7, #0
+ str r4, [r12, #4]
+ str r6, [r12, #8]
+ movge r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r2, [r12, #12]
+ movge r3, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r3, [r12, #16]
+ movge lr, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r7, #0
+ str lr, [r12, #20]
+ movge r5, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r5, [r12, #24]
+ movge r8, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r12, #32]
+ movge r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r11, [r12, #36]
+ movge r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r12, #40]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r7, #0
+ str r1, [r12, #44]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r1, [r12, #48]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #52]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end219:
+ .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add14L
+ .align 2
+ .type mcl_fpDbl_add14L,%function
+mcl_fpDbl_add14L: @ @mcl_fpDbl_add14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #168
+ sub sp, sp, #168
+ ldr r7, [r1]
+ ldmib r1, {r6, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r8, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #92] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #152] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #156] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #160] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #164] @ 4-byte Spill
+ adcs r4, r5, r6
+ adcs r7, r8, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r2, [r1, #56]
+ ldr r8, [r1, #48]
+ ldr r9, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r10
+ str r2, [r0, #44]
+ adcs r1, r1, r8
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r2, r2, r9
+ adcs r6, r1, r7
+ str r2, [r0, #52]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r5, [sp, #88] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r7, [sp, #100] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r8, r1, r2
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r8, [sp, #124] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #164] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #120] @ 4-byte Spill
+ ldmib r3, {r2, r12}
+ ldr r1, [r3, #16]
+ ldr r11, [r3]
+ ldr r9, [r3, #12]
+ ldr r10, [r3, #36]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ subs r11, r6, r11
+ sbcs r2, r5, r2
+ sbcs r12, r4, r12
+ sbcs r4, r7, r9
+ ldr r7, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r6, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r5, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ sbcs r3, lr, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs lr, r1, r6
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r6, r1, r6
+ ldr r1, [sp, #148] @ 4-byte Reload
+ sbcs r8, r8, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs r9, r1, r10
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r10, r1, r7
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r7, [sp, #80] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r11, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r11, [r0, #56]
+ movne r2, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #64]
+ movne r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #68]
+ movne r3, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #72]
+ ldr r3, [sp, #128] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #76]
+ movne r5, r2
+ ldr r2, [sp, #144] @ 4-byte Reload
+ str r5, [r0, #80]
+ movne r6, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r6, [r0, #84]
+ movne r8, r2
+ ldr r2, [sp, #148] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #88]
+ movne r9, r2
+ ldr r2, [sp, #152] @ 4-byte Reload
+ str r9, [r0, #92]
+ movne r10, r2
+ ldr r2, [sp, #156] @ 4-byte Reload
+ str r10, [r0, #96]
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #100]
+ movne r2, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r2, [r0, #104]
+ movne r7, r1
+ str r7, [r0, #108]
+ add sp, sp, #168
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end220:
+ .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub14L
+ .align 2
+ .type mcl_fpDbl_sub14L,%function
+mcl_fpDbl_sub14L: @ @mcl_fpDbl_sub14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #168
+ sub sp, sp, #168
+ ldr r7, [r2, #96]
+ add r9, r1, #32
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldm r2, {r5, r8, r12, lr}
+ ldr r6, [r1]
+ ldmib r1, {r4, r7, r10}
+ subs r5, r6, r5
+ sbcs r4, r4, r8
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r2, #44]
+ sbcs r7, r7, r12
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r5, [sp, #84] @ 4-byte Spill
+ str r4, [sp, #80] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ sbcs r7, r10, lr
+ ldr r10, [r2, #16]
+ add lr, r1, #16
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldm r9, {r4, r5, r6, r8, r9}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r10
+ str r7, [r0, #8]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ add lr, r3, #8
+ str r2, [r0, #28]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r1, r6, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbcs r1, r9, r1
+ str r1, [r0, #48]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #52]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r10, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r10, [sp, #80] @ 4-byte Spill
+ sbcs r11, r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r11, [sp, #84] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #120] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #148] @ 4-byte Spill
+ ldr r2, [sp, #152] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #152] @ 4-byte Spill
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r9, [sp, #108] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #160] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #156] @ 4-byte Spill
+ ldr r2, [sp, #140] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #164] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldm r3, {r2, r5}
+ ldm lr, {r4, r6, lr}
+ ldr r7, [r3, #24]
+ ldr r8, [r3, #28]
+ ldr r12, [r3, #20]
+ ldr r3, [sp, #128] @ 4-byte Reload
+ adds r1, r10, r2
+ ldr r10, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #136] @ 4-byte Reload
+ adcs r5, r11, r5
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r4, r10, r4
+ adcs r2, r2, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r12, r6, r12
+ ldr r6, [sp, #148] @ 4-byte Reload
+ adcs lr, r6, r7
+ ldr r6, [sp, #152] @ 4-byte Reload
+ ldr r7, [sp, #132] @ 4-byte Reload
+ adcs r8, r6, r8
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r9, r9, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #160] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #156] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [sp, #140] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #164] @ 4-byte Reload
+ adc r7, r7, r11
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, r6
+ moveq r4, r10
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ moveq r5, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r7, #0
+ str r5, [r0, #60]
+ str r4, [r0, #64]
+ moveq r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r2, [r0, #68]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r3, [r0, #72]
+ ldr r3, [sp, #116] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r7, #0
+ str r12, [r0, #76]
+ moveq lr, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ str lr, [r0, #80]
+ moveq r8, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r8, [r0, #84]
+ moveq r9, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r0, #88]
+ moveq r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r2, [r0, #92]
+ ldr r2, [sp, #112] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ cmp r7, #0
+ ldr r7, [sp, #120] @ 4-byte Reload
+ moveq r7, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ moveq r6, r1
+ add r1, r0, #96
+ stm r1, {r2, r3, r7}
+ str r6, [r0, #108]
+ add sp, sp, #168
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv480x32,%function
+.LmulPv480x32: @ @mulPv480x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r1, [r1, #56]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #56]
+ adc r1, r7, #0
+ str r1, [r0, #60]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre15L
+ .align 2
+ .type mcl_fp_mulUnitPre15L,%function
+mcl_fp_mulUnitPre15L: @ @mcl_fp_mulUnitPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ mov r4, r0
+ add r0, sp, #16
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #76]
+ add r11, sp, #48
+ add lr, sp, #20
+ ldr r9, [sp, #64]
+ ldr r10, [sp, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r6, r8, r11}
+ ldr r7, [sp, #44]
+ ldr r5, [sp, #40]
+ ldr r1, [sp, #16]
+ ldm lr, {r0, r2, r3, r12, lr}
+ str r1, [r4]
+ stmib r4, {r0, r2, r3, r12, lr}
+ add r0, r4, #32
+ str r5, [r4, #24]
+ str r7, [r4, #28]
+ stm r0, {r6, r8, r11}
+ str r10, [r4, #44]
+ str r9, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre15L
+ .align 2
+ .type mcl_fpDbl_mulPre15L,%function
+mcl_fpDbl_mulPre15L: @ @mcl_fpDbl_mulPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ mov r4, r0
+ add r0, sp, #1000
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r8, r1
+ ldr r2, [r3]
+ str r3, [sp, #92] @ 4-byte Spill
+ str r4, [sp, #100] @ 4-byte Spill
+ mov r6, r3
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #1004]
+ ldr r2, [r6, #4]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #1008]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #1012]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r1, [sp, #28] @ 4-byte Spill
+ mov r1, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [r4]
+ add r0, sp, #936
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #996]
+ add r10, sp, #960
+ add lr, sp, #936
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r6, #8]
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #932]
+ ldr r8, [sp, #872]
+ add r12, sp, #880
+ ldr lr, [sp, #912]
+ ldr r7, [sp, #908]
+ ldr r11, [sp, #904]
+ ldr r9, [sp, #900]
+ ldr r10, [sp, #876]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ adds r4, r8, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r4, [r5, #8]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r4, r10, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #12]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #808
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #868]
+ add r9, sp, #836
+ add lr, sp, #816
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #808]
+ ldr r11, [sp, #812]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r10, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r5, #12]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #804]
+ add lr, sp, #768
+ add r12, sp, #748
+ ldr r11, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r9, r10, lr}
+ ldr r8, [sp, #744]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r4, r8, r4
+ str r4, [r7, #16]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ mov r7, r5
+ adcs r4, r0, r4
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #740]
+ ldr r9, [sp, #680]
+ add lr, sp, #684
+ ldr r10, [sp, #720]
+ ldr r8, [sp, #716]
+ ldr r11, [sp, #712]
+ ldr r6, [sp, #708]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ adds r4, r9, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r4, [r5, #20]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #616
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #676]
+ add r8, sp, #648
+ add lr, sp, #624
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr r10, [sp, #644]
+ ldr r0, [sp, #616]
+ ldr r11, [sp, #620]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r5, #24]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #584
+ add r12, sp, #556
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r6, r7, r11}
+ ldr lr, [sp, #580]
+ ldr r9, [sp, #576]
+ ldr r10, [sp, #552]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r8, [sp, #100] @ 4-byte Reload
+ adds r4, r10, r4
+ str r4, [r8, #28]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #548]
+ ldr r9, [sp, #488]
+ add lr, sp, #492
+ mov r6, r8
+ ldr r10, [sp, #524]
+ ldr r11, [sp, #520]
+ ldr r7, [sp, #516]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ adds r4, r9, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #532]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r4, [r6, #32]
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #36]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #484]
+ add r8, sp, #456
+ add lr, sp, #432
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r8, {r5, r7, r8}
+ ldr r10, [sp, #452]
+ ldr r0, [sp, #424]
+ ldr r11, [sp, #428]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r6, #36]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ mov r7, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #420]
+ add r12, sp, #364
+ ldr r11, [sp, #396]
+ ldr r6, [sp, #392]
+ ldr lr, [sp, #388]
+ ldr r9, [sp, #384]
+ ldr r10, [sp, #360]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r8, [sp, #100] @ 4-byte Reload
+ adds r4, r10, r4
+ str r4, [r8, #40]
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #44]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #356]
+ ldr r9, [sp, #296]
+ add lr, sp, #300
+ mov r5, r8
+ ldr r10, [sp, #336]
+ ldr r7, [sp, #332]
+ ldr r11, [sp, #328]
+ ldr r6, [sp, #324]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ adds r4, r9, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r4, [r5, #44]
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #48]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #292]
+ add lr, sp, #240
+ ldr r8, [sp, #268]
+ ldr r7, [sp, #264]
+ ldr r10, [sp, #260]
+ ldr r3, [sp, #232]
+ ldr r11, [sp, #236]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adds r3, r3, r9
+ add r9, sp, #168
+ str r3, [r5, #48]
+ ldr r3, [r4, #52]
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r11, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r11, r0, r4
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, r9
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #228]
+ add r12, sp, #172
+ ldr r6, [sp, #204]
+ ldr r4, [sp, #200]
+ ldr lr, [sp, #196]
+ ldr r8, [sp, #192]
+ ldr r9, [sp, #188]
+ ldr r2, [sp, #168]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r1, r3, r12}
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adds r2, r2, r7
+ str r2, [r5, #52]
+ adcs r5, r0, r11
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r2, [r2, #56]
+ adcs r10, r3, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r11, r12, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #104
+ add r12, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r6, r0, r5
+ ldr r0, [sp, #164]
+ adcs lr, r1, r7
+ adcs r4, r2, r10
+ adcs r7, r3, r11
+ add r11, sp, #136
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r11, {r5, r8, r9, r10, r11}
+ ldm r12, {r1, r2, r3, r12}
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r6, [r0, #56]
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str lr, [r0, #60]
+ str r4, [r0, #64]
+ str r7, [r0, #68]
+ ldr r7, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r6, [r0, #72]
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #76]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #80]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [r0, #84]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r12, r5, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r12, [r0, #88]
+ add r12, r0, #92
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adcs r6, r5, r6
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #96] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #108]
+ str r5, [r0, #112]
+ adc r4, r4, #0
+ str r4, [r0, #116]
+ add sp, sp, #44
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre15L
+ .align 2
+ .type mcl_fpDbl_sqrPre15L,%function
+mcl_fpDbl_sqrPre15L: @ @mcl_fpDbl_sqrPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ .pad #1024
+ sub sp, sp, #1024
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #1000
+ ldr r2, [r5]
+ str r4, [sp, #100] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #1004]
+ ldr r2, [r5, #4]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #1008]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #1012]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r1, [sp, #36] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [r4]
+ add r0, sp, #936
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #996]
+ add r10, sp, #960
+ add lr, sp, #936
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #932]
+ add r12, sp, #896
+ ldr lr, [sp, #912]
+ ldr r6, [sp, #908]
+ add r10, sp, #876
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r9, r11, r12}
+ ldr r8, [sp, #872]
+ ldm r10, {r0, r1, r2, r3, r10}
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r4, r8, r4
+ str r4, [r7, #8]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #808
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #868]
+ add r10, sp, #836
+ add lr, sp, #812
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r11, [sp, #808]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r4
+ ldr r4, [sp, #100] @ 4-byte Reload
+ str r7, [r4, #12]
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #804]
+ add r8, sp, #776
+ add lr, sp, #764
+ add r12, sp, #744
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #16]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #740]
+ add r8, sp, #712
+ add lr, sp, #684
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r11, [sp, #708]
+ ldr r10, [sp, #680]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r9, [sp, #100] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r9, #20]
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #616
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #644
+ add lr, sp, #620
+ mov r4, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r11, [sp, #616]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r7
+ str r7, [r4, #24]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #612]
+ add r8, sp, #584
+ add lr, sp, #572
+ add r12, sp, #552
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #548]
+ add r8, sp, #520
+ add lr, sp, #492
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #532]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r11, [sp, #516]
+ ldr r10, [sp, #488]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r9, [sp, #100] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r9, #32]
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #484]
+ add r10, sp, #452
+ add lr, sp, #428
+ mov r4, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r11, [sp, #424]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r7
+ str r7, [r4, #36]
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #40]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #420]
+ add r8, sp, #392
+ add lr, sp, #380
+ add r12, sp, #360
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #40]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #356]
+ add r9, sp, #328
+ add lr, sp, #300
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r9, {r6, r8, r9}
+ ldr r11, [sp, #324]
+ ldr r10, [sp, #296]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r4, #44]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #48]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #292]
+ add r11, sp, #256
+ add lr, sp, #236
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r10, r11}
+ ldr r12, [sp, #232]
+ ldm lr, {r0, r1, r2, r3, lr}
+ adds r7, r12, r7
+ ldr r12, [r5, #52]
+ str r7, [r4, #48]
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #228]
+ add lr, sp, #196
+ add r12, sp, #172
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r8, r11, lr}
+ ldr r9, [sp, #192]
+ ldr r10, [sp, #188]
+ ldr r2, [sp, #168]
+ ldm r12, {r0, r1, r3, r12}
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adds r2, r2, r6
+ add r6, sp, #104
+ str r2, [r4, #52]
+ adcs r4, r0, r7
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r5, #56]
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r7, r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, r6
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #104
+ add r11, sp, #136
+ add r12, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r6, r0, r4
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs lr, r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r4, r2, r7
+ adcs r7, r3, r0
+ ldr r0, [sp, #164]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r11, {r5, r8, r9, r10, r11}
+ ldm r12, {r1, r2, r3, r12}
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r6, [r0, #56]
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str lr, [r0, #60]
+ str r4, [r0, #64]
+ str r7, [r0, #68]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r6, [r0, #72]
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [r0, #76]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #80]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [r0, #84]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r12, r5, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r12, [r0, #88]
+ add r12, r0, #92
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adcs r6, r5, r6
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #60] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #108]
+ str r5, [r0, #112]
+ adc r4, r4, #0
+ str r4, [r0, #116]
+ add sp, sp, #44
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont15L
+ .align 2
+ .type mcl_fp_mont15L,%function
+mcl_fp_mont15L: @ @mcl_fp_mont15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #124
+ add r7, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, r7, #968
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #120] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1996]
+ ldr r5, [sp, #1992]
+ add r7, sp, #1024
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ mul r2, r5, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2044]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2036]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2032]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2028]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2024]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, r7, #904
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1988]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r9, [sp, #1952]
+ ldr r6, [sp, #1948]
+ ldr r8, [sp, #1944]
+ ldr r4, [sp, #1928]
+ ldr r10, [sp, #1932]
+ ldr r11, [sp, #1936]
+ ldr r7, [sp, #1940]
+ add lr, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1984]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1980]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1976]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1972]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1968]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1964]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1960]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #840
+ bl .LmulPv480x32(PLT)
+ adds r0, r4, r5
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #1880]
+ ldr r12, [sp, #1884]
+ ldr lr, [sp, #1888]
+ ldr r4, [sp, #1892]
+ ldr r5, [sp, #1896]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1908]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1900]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1864]
+ adcs r1, r9, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ ldr r9, [sp, #1904]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1876]
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #1872]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #776
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1860]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1828]
+ ldr r11, [sp, #1824]
+ ldr r8, [sp, #1820]
+ ldr r4, [sp, #1816]
+ ldr r5, [sp, #1800]
+ ldr r7, [sp, #1804]
+ ldr r9, [sp, #1808]
+ ldr r10, [sp, #1812]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #712
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1748]
+ ldr r3, [sp, #1752]
+ ldr r12, [sp, #1756]
+ ldr lr, [sp, #1760]
+ adds r0, r0, r5
+ ldr r5, [sp, #1768]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1776]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1780]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1764]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1772]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1736]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1744]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1740]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #648
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1732]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1700]
+ ldr r11, [sp, #1696]
+ ldr r8, [sp, #1692]
+ ldr r4, [sp, #1688]
+ ldr r5, [sp, #1672]
+ ldr r7, [sp, #1676]
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1684]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1728]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #584
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1620]
+ ldr r3, [sp, #1624]
+ ldr r12, [sp, #1628]
+ ldr lr, [sp, #1632]
+ adds r0, r0, r5
+ ldr r5, [sp, #1640]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1648]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1652]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1636]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1644]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1608]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1616]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1668]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1664]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1612]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #520
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1604]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1572]
+ ldr r11, [sp, #1568]
+ ldr r8, [sp, #1564]
+ ldr r4, [sp, #1560]
+ ldr r5, [sp, #1544]
+ ldr r7, [sp, #1548]
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1556]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1592]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #456
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1492]
+ ldr r3, [sp, #1496]
+ ldr r12, [sp, #1500]
+ ldr lr, [sp, #1504]
+ adds r0, r0, r5
+ ldr r5, [sp, #1512]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1520]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1524]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1508]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1516]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1480]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1488]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #392
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1476]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1444]
+ ldr r11, [sp, #1440]
+ ldr r8, [sp, #1436]
+ ldr r4, [sp, #1432]
+ ldr r5, [sp, #1416]
+ ldr r7, [sp, #1420]
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1428]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1460]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #328
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1364]
+ ldr r3, [sp, #1368]
+ ldr r12, [sp, #1372]
+ ldr lr, [sp, #1376]
+ adds r0, r0, r5
+ ldr r5, [sp, #1384]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1392]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1396]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1380]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1388]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1352]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #264
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1348]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1316]
+ ldr r11, [sp, #1312]
+ ldr r8, [sp, #1308]
+ ldr r4, [sp, #1304]
+ ldr r5, [sp, #1288]
+ ldr r7, [sp, #1292]
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1300]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #200
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ adds r0, r0, r5
+ ldr r5, [sp, #1256]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1264]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1268]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1252]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1260]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1224]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1232]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #136
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1220]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1188]
+ ldr r11, [sp, #1184]
+ ldr r8, [sp, #1180]
+ ldr r4, [sp, #1176]
+ ldr r5, [sp, #1160]
+ ldr r7, [sp, #1164]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1172]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #72
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1108]
+ ldr r3, [sp, #1112]
+ ldr r12, [sp, #1116]
+ ldr lr, [sp, #1120]
+ adds r0, r0, r5
+ ldr r5, [sp, #1128]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1136]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1140]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1124]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1132]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1096]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1104]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1100]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #8
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1060]
+ ldr r11, [sp, #1056]
+ ldr r8, [sp, #1052]
+ ldr r4, [sp, #1048]
+ ldr r5, [sp, #1032]
+ ldr r7, [sp, #1036]
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1044]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #968
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #972
+ adds r0, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #996
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #968]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #904
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #964]
+ add r11, sp, #920
+ add r10, sp, #904
+ ldr r6, [sp, #932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #960]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #956]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #840
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #844
+ adds r0, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #880
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #868
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldm r11, {r4, r5, r11}
+ ldr r6, [sp, #840]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adds r6, r7, r6
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r6, [sp, #32] @ 4-byte Spill
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r6, r11
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #836]
+ add r10, sp, #776
+ ldr r4, [sp, #800]
+ ldr r5, [sp, #796]
+ ldr r6, [sp, #792]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #712
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #728
+ adds r0, r0, r7
+ ldr r7, [sp, #724]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #752
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #716]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #720]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #712]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r0, r0, r4
+ str r1, [sp, #52] @ 4-byte Spill
+ mul r1, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #112] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #104] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #708]
+ add r10, sp, #648
+ ldr r11, [sp, #676]
+ ldr r4, [sp, #672]
+ ldr r6, [sp, #668]
+ ldr r5, [sp, #664]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #584
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #600
+ adds r0, r0, r7
+ add r7, sp, #584
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #624
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r7, {r4, r5, r6, r7}
+ adds r1, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #644]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r5, r11, r5
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #56] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #520
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #580]
+ add r11, sp, #524
+ ldr r10, [sp, #548]
+ ldr r5, [sp, #544]
+ ldr r6, [sp, #540]
+ ldr r7, [sp, #520]
+ add r0, sp, #456
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #576]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #572]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #568]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #560]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r8, r9, r11}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #460
+ adds r0, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #484
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #456]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #392
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #452]
+ ldr r6, [sp, #420]
+ ldr r7, [sp, #416]
+ ldr r9, [sp, #412]
+ ldr r4, [sp, #408]
+ ldr r10, [sp, #392]
+ ldr r11, [sp, #396]
+ ldr r8, [sp, #400]
+ ldr r5, [sp, #404]
+ add r0, sp, #328
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #448]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #444]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #440]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #436]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #432]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #332
+ adds r0, r0, r10
+ add r10, sp, #356
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #264
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #324]
+ add r9, sp, #276
+ ldr r6, [sp, #292]
+ ldr r7, [sp, #288]
+ ldr r10, [sp, #264]
+ ldr r11, [sp, #268]
+ ldr r5, [sp, #272]
+ add r0, sp, #200
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #316]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #308]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #304]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #216
+ adds r0, r0, r10
+ ldr r10, [sp, #212]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r5
+ ldr r5, [sp, #208]
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #200]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #240
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #204]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r7, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ mul r1, r7, r0
+ ldr r0, [sp, #260]
+ str r1, [sp, #60] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #68] @ 4-byte Reload
+ adcs r10, r6, r10
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r6, r0, r9
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r9, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #128] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #136
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r7, r0
+ adcs r11, r11, r1
+ ldr r0, [sp, #152]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs lr, r5, r2
+ mov r5, r9
+ str r11, [sp, #44] @ 4-byte Spill
+ adcs r10, r10, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ str r10, [sp, #60] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #156]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r8, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r6, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldmib r5, {r1, r2}
+ ldr r3, [r5, #16]
+ ldr r7, [r5]
+ ldr r0, [r5, #12]
+ ldr r6, [r5, #20]
+ ldr r9, [r5, #24]
+ ldr r8, [r5, #32]
+ str r3, [sp, #80] @ 4-byte Spill
+ ldr r3, [r5, #28]
+ subs r7, r11, r7
+ add r11, r5, #36
+ str r3, [sp, #84] @ 4-byte Spill
+ sbcs r3, lr, r1
+ sbcs lr, r10, r2
+ ldm r11, {r1, r10, r11}
+ sbcs r4, r4, r0
+ ldr r0, [r5, #48]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #56]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r2, r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r12, r0, r6
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r5, r0, r9
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r6, r0, r6
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r8, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r9, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r10, r0, r10
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ movne r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r7, [r0]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ movne r3, r7
+ str r3, [r0, #4]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movne lr, r3
+ ldr r3, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movne r4, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r4, [r0, #12]
+ movne r2, r3
+ str r2, [r0, #16]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movne r5, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r5, [r0, #24]
+ movne r6, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r6, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r10, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r10, [r0, #40]
+ movne r11, r2
+ cmp r1, #0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r11, [r0, #44]
+ movne r2, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r2, [r0, #52]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #56]
+ add sp, sp, #12
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end226:
+ .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF15L
+ .align 2
+ .type mcl_fp_montNF15L,%function
+mcl_fp_montNF15L: @ @mcl_fp_montNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #4
+ sub sp, sp, #4
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #116
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #1984
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #112] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1984]
+ ldr r1, [sp, #1988]
+ str r0, [sp, #60] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2044]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #1992]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #1996]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2036]
+ str r1, [sp, #80] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2032]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2028]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #2024]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1920
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1980]
+ add r7, sp, #1936
+ add r11, sp, #1920
+ ldr r6, [sp, #1948]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1976]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1972]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1968]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1964]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1960]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1952]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r7, {r4, r5, r7}
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r8, [sp, #1932]
+ ldr r2, [r0, #4]
+ add r0, sp, #1856
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1868]
+ ldr r3, [sp, #1872]
+ ldr r12, [sp, #1876]
+ ldr lr, [sp, #1880]
+ adds r0, r9, r0
+ ldr r9, [sp, #1896]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1900]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1892]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1884]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #1888]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1856]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r6, r11, r6
+ ldr r1, [sp, #1864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1792
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1852]
+ add r11, sp, #1808
+ add r10, sp, #1792
+ ldr r6, [sp, #1820]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1728
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1740]
+ ldr r3, [sp, #1744]
+ ldr r12, [sp, #1748]
+ ldr lr, [sp, #1752]
+ adds r0, r0, r5
+ ldr r5, [sp, #1760]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1768]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1772]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1756]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1764]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1728]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1736]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1732]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1664
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1724]
+ add r11, sp, #1680
+ add r10, sp, #1664
+ ldr r6, [sp, #1692]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1600
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1612]
+ ldr r3, [sp, #1616]
+ ldr r12, [sp, #1620]
+ ldr lr, [sp, #1624]
+ adds r0, r0, r5
+ ldr r5, [sp, #1632]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1640]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1644]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1628]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1636]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1600]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1608]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1604]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1536
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1596]
+ add r11, sp, #1552
+ add r10, sp, #1536
+ ldr r6, [sp, #1564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1592]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1472
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1484]
+ ldr r3, [sp, #1488]
+ ldr r12, [sp, #1492]
+ ldr lr, [sp, #1496]
+ adds r0, r0, r5
+ ldr r5, [sp, #1504]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1512]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1516]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1500]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1508]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1472]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1480]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1408
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1468]
+ add r11, sp, #1424
+ add r10, sp, #1408
+ ldr r6, [sp, #1436]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1460]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1344
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1356]
+ ldr r3, [sp, #1360]
+ ldr r12, [sp, #1364]
+ ldr lr, [sp, #1368]
+ adds r0, r0, r5
+ ldr r5, [sp, #1376]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1384]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1388]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1372]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1380]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1344]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1352]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1396]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1280
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1340]
+ add r11, sp, #1296
+ add r10, sp, #1280
+ ldr r6, [sp, #1308]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1216
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1228]
+ ldr r3, [sp, #1232]
+ ldr r12, [sp, #1236]
+ ldr lr, [sp, #1240]
+ adds r0, r0, r5
+ ldr r5, [sp, #1248]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1256]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1260]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1244]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1252]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1216]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1152
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1212]
+ add r11, sp, #1168
+ add r10, sp, #1152
+ ldr r6, [sp, #1180]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1088
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1100]
+ ldr r3, [sp, #1104]
+ ldr r12, [sp, #1108]
+ ldr lr, [sp, #1112]
+ adds r0, r0, r5
+ ldr r5, [sp, #1120]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1128]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1132]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1116]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1124]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1096]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1024
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1084]
+ add r11, sp, #1040
+ add r10, sp, #1024
+ ldr r6, [sp, #1052]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #960
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #964
+ adds r0, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #988
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #960]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #896
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #956]
+ add r11, sp, #912
+ add r10, sp, #896
+ ldr r6, [sp, #924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #832
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #836
+ adds r0, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #860
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #832]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #768
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #828]
+ add r11, sp, #768
+ ldr r6, [sp, #792]
+ ldr r5, [sp, #788]
+ ldr r8, [sp, #784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r4, [sp, #780]
+ ldr r2, [r0, #40]
+ add r0, sp, #704
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #720
+ adds r0, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r2, r0, r10
+ ldr r0, [sp, #104] @ 4-byte Reload
+ add r10, sp, #744
+ adcs r0, r0, r11
+ ldr r11, [sp, #708]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #716]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #704]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #712]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r2, r5
+ mul r1, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r1, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #100] @ 4-byte Spill
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #96] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #700]
+ add r7, sp, #656
+ add r11, sp, #640
+ ldr r4, [sp, #668]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r7, {r5, r6, r7}
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #576
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #592
+ adds r0, r0, r8
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #616
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #576
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r5, [sp, #588]
+ adds r1, r2, r4
+ mul r2, r1, r0
+ ldr r0, [sp, #636]
+ str r1, [sp, #108] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #572]
+ add r11, sp, #520
+ ldr r8, [sp, #540]
+ ldr r9, [sp, #536]
+ ldr r10, [sp, #532]
+ ldr r6, [sp, #512]
+ ldr r7, [sp, #516]
+ add r0, sp, #448
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #568]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #560]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #548]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #544]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r11}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #452
+ adds r0, r0, r6
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #476
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #448]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #384
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #444]
+ add r9, sp, #396
+ ldr r11, [sp, #412]
+ ldr r8, [sp, #408]
+ ldr r5, [sp, #384]
+ ldr r4, [sp, #388]
+ ldr r10, [sp, #392]
+ add r0, sp, #320
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #440]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #436]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #432]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #324
+ adds r0, r0, r5
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #348
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #320]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #256
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #316]
+ add r11, sp, #260
+ ldr r8, [sp, #284]
+ ldr r9, [sp, #280]
+ ldr r10, [sp, #276]
+ ldr r7, [sp, #256]
+ add r0, sp, #192
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #308]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #304]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r11}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #208
+ adds r0, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #192
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #68] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r7, [sp, #204]
+ ldr r10, [sp, #236]
+ adds r5, r0, r4
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r4, [sp, #232]
+ mul r1, r5, r0
+ ldr r0, [sp, #252]
+ str r1, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #240]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs r9, r6, r9
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r6, r0, r10
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r10, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #128
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ adcs r11, r8, r1
+ ldr r0, [sp, #144]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs lr, r9, r2
+ str r11, [sp, #40] @ 4-byte Spill
+ adcs r8, r7, r3
+ str lr, [sp, #48] @ 4-byte Spill
+ str r8, [sp, #56] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #148]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adc r0, r1, r0
+ mov r1, r10
+ add r10, r1, #20
+ str r0, [sp, #120] @ 4-byte Spill
+ ldmib r1, {r0, r6}
+ ldr r2, [r1, #12]
+ ldr r12, [r1, #16]
+ ldm r10, {r5, r9, r10}
+ ldr r7, [r1]
+ subs r7, r11, r7
+ ldr r11, [r1, #36]
+ sbcs r3, lr, r0
+ ldr r0, [r1, #32]
+ sbcs lr, r8, r6
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r2
+ ldr r2, [r1, #44]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ ldr r1, [r1, #56]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r2, [sp, #52] @ 4-byte Spill
+ sbcs r2, r1, r12
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r12, r1, r5
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r5, r1, r9
+ ldr r1, [sp, #92] @ 4-byte Reload
+ sbcs r6, r1, r10
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r9, r1, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r10, r0, r11
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r11, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbc r8, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ asr r1, r8, #31
+ cmp r1, #0
+ movlt r7, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [r0]
+ ldr r7, [sp, #48] @ 4-byte Reload
+ movlt r3, r7
+ str r3, [r0, #4]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movlt lr, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movlt r4, r3
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r4, [r0, #12]
+ movlt r2, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r5, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r5, [r0, #24]
+ movlt r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r6, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r11, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r11, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #52]
+ movlt r8, r1
+ str r8, [r0, #56]
+ add sp, sp, #4
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end227:
+ .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed15L
+ .align 2
+ .type mcl_fp_montRed15L,%function
+mcl_fp_montRed15L: @ @mcl_fp_montRed15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #148
+ sub sp, sp, #148
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #200] @ 4-byte Spill
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #100] @ 4-byte Spill
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #204] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #1104
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1164]
+ ldr r9, [sp, #1104]
+ ldr r1, [sp, #1112]
+ ldr r2, [sp, #1116]
+ ldr r3, [sp, #1120]
+ ldr r12, [sp, #1124]
+ ldr lr, [sp, #1128]
+ ldr r4, [sp, #1132]
+ ldr r5, [sp, #1136]
+ ldr r6, [sp, #1140]
+ ldr r8, [sp, #1144]
+ ldr r10, [sp, #1148]
+ ldr r11, [sp, #1152]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ adds r7, r7, r9
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ adcs r9, r7, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #1040
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r4, [sp, #1040]
+ ldr r1, [sp, #1048]
+ ldr r2, [sp, #1052]
+ ldr r8, [sp, #1056]
+ ldr r3, [sp, #1060]
+ ldr r10, [sp, #1064]
+ ldr r11, [sp, #1068]
+ ldr r12, [sp, #1072]
+ ldr r7, [sp, #1076]
+ ldr r6, [sp, #1080]
+ ldr lr, [sp, #1084]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ adds r4, r9, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ adcs r9, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #976
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1036]
+ add lr, sp, #1000
+ add r10, sp, #976
+ ldr r5, [sp, #1020]
+ ldr r6, [sp, #1016]
+ ldr r7, [sp, #1012]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r9, [sp, #996]
+ ldr r2, [sp, #992]
+ ldm r10, {r0, r1, r8, r10}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r1
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #912
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #972]
+ ldr r4, [sp, #912]
+ add lr, sp, #916
+ ldr r11, [sp, #960]
+ ldr r5, [sp, #956]
+ ldr r6, [sp, #952]
+ ldr r7, [sp, #948]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #968]
+ adds r4, r8, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #964]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r9, r10, r12, lr}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r4, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #908]
+ add r10, sp, #872
+ add lr, sp, #848
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mov r11, r1
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r5
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #784
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #844]
+ ldr r4, [sp, #784]
+ add r10, sp, #788
+ ldr lr, [sp, #832]
+ ldr r5, [sp, #828]
+ ldr r6, [sp, #824]
+ ldr r7, [sp, #820]
+ ldr r12, [sp, #816]
+ ldr r3, [sp, #812]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ adds r4, r11, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #204] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #780]
+ add r10, sp, #744
+ add lr, sp, #720
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mov r11, r1
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #656
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #716]
+ ldr r4, [sp, #656]
+ add r10, sp, #660
+ ldr lr, [sp, #704]
+ ldr r5, [sp, #700]
+ ldr r6, [sp, #696]
+ ldr r7, [sp, #692]
+ ldr r12, [sp, #688]
+ ldr r3, [sp, #684]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ adds r4, r11, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #200] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #592
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #652]
+ add r10, sp, #616
+ add lr, sp, #592
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #648]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #644]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r4, [sp, #528]
+ add r10, sp, #532
+ ldr lr, [sp, #572]
+ ldr r6, [sp, #568]
+ ldr r7, [sp, #564]
+ ldr r12, [sp, #560]
+ ldr r3, [sp, #556]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ adds r4, r11, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #464
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #524]
+ add r10, sp, #488
+ add lr, sp, #464
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ mov r6, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #460]
+ ldr r4, [sp, #400]
+ add r10, sp, #404
+ ldr lr, [sp, #440]
+ ldr r7, [sp, #436]
+ ldr r12, [sp, #432]
+ ldr r3, [sp, #428]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #456]
+ adds r4, r11, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #452]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #396]
+ add r10, sp, #360
+ add lr, sp, #336
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mul r2, r11, r6
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv480x32(PLT)
+ add r5, sp, #272
+ add lr, sp, #288
+ ldm r5, {r0, r1, r3, r5}
+ ldr r9, [sp, #332]
+ ldr r8, [sp, #328]
+ ldr r7, [sp, #312]
+ adds r0, r11, r0
+ ldr r11, [sp, #324]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r0, r1
+ mul r0, r10, r6
+ ldr r6, [sp, #316]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #320]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r4, [sp, #196] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r8, r0, r9
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #208
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [sp, #80] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #84] @ 4-byte Spill
+ adcs r3, r0, r3
+ ldr r0, [sp, #224]
+ str r3, [sp, #88] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #228]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #92] @ 4-byte Spill
+ adcs r4, r4, r0
+ ldr r0, [sp, #232]
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #236]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r5, [sp, #100] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #240]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r6, [sp, #104] @ 4-byte Spill
+ adcs r11, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r11, [sp, #108] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #196] @ 4-byte Reload
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ adcs r10, r1, r0
+ ldr r0, [sp, #260]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r10, [sp, #124] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r9, [sp, #128] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #268]
+ adcs r12, r1, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #188] @ 4-byte Reload
+ subs r0, lr, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #164] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #168] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r4, [sp, #172] @ 4-byte Reload
+ ldr r7, [sp, #136] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #176] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r6, r11, r6
+ ldr r11, [sp, #200] @ 4-byte Reload
+ str r6, [sp, #172] @ 4-byte Spill
+ sbcs r6, r11, r7
+ ldr r7, [sp, #140] @ 4-byte Reload
+ ldr r11, [sp, #204] @ 4-byte Reload
+ str r6, [sp, #176] @ 4-byte Spill
+ ldr r6, [sp, #196] @ 4-byte Reload
+ sbcs r6, r6, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ str r6, [sp, #180] @ 4-byte Spill
+ sbcs r6, r11, r7
+ ldr r7, [sp, #148] @ 4-byte Reload
+ str r6, [sp, #184] @ 4-byte Spill
+ sbcs r6, r10, r7
+ ldr r7, [sp, #152] @ 4-byte Reload
+ mov r10, r8
+ str r6, [sp, #188] @ 4-byte Spill
+ sbcs r6, r9, r7
+ ldr r7, [sp, #156] @ 4-byte Reload
+ sbcs r11, r8, r7
+ ldr r7, [sp, #160] @ 4-byte Reload
+ mov r8, r12
+ sbcs r9, r12, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r12, r7, #1
+ ldr r7, [sp, #80] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #192] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ cmp r12, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str lr, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp r12, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ movne r0, r1
+ str r5, [r7, #24]
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r0, [r7, #28]
+ ldr r0, [sp, #200] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ cmp r12, #0
+ str r1, [r7, #32]
+ ldr r1, [sp, #180] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ str r1, [r7, #36]
+ ldr r1, [sp, #184] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ str r1, [r7, #40]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ movne r0, r1
+ cmp r12, #0
+ str r0, [r7, #44]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ movne r11, r10
+ movne r9, r8
+ movne r6, r0
+ str r6, [r7, #48]
+ str r11, [r7, #52]
+ str r9, [r7, #56]
+ add sp, sp, #148
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end228:
+ .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre15L
+ .align 2
+ .type mcl_fp_addPre15L,%function
+mcl_fp_addPre15L: @ @mcl_fp_addPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldm r1, {r3, r7, r11}
+ ldr r10, [r2]
+ ldr r5, [r2, #16]
+ ldr r6, [r2, #4]
+ ldr r4, [r2, #8]
+ ldr r12, [r2, #12]
+ ldr r8, [r1, #12]
+ ldr r9, [r1, #56]
+ adds lr, r10, r3
+ ldr r3, [r2, #32]
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ ldr r10, [r1, #44]
+ adcs r6, r6, r7
+ adcs r4, r4, r11
+ ldr r11, [r1, #40]
+ adcs r7, r12, r8
+ add r12, r1, #16
+ ldr r8, [r1, #52]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [r1, #32]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ ldr r2, [r2, #56]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r3, [sp, #44] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ str r6, [r0, #4]
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r1, r6, r1
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r2, r2, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r3, r11
+ adcs r7, r7, r10
+ adcs r6, r6, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ adcs r5, r5, r8
+ adcs r4, r4, r9
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end229:
+ .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre15L
+ .align 2
+ .type mcl_fp_subPre15L,%function
+mcl_fp_subPre15L: @ @mcl_fp_subPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r2, {r3, r11}
+ ldr r7, [r1]
+ ldr r5, [r2, #8]
+ ldr r6, [r2, #12]
+ ldmib r1, {r4, r12, lr}
+ ldr r8, [r1, #32]
+ ldr r10, [r1, #52]
+ subs r3, r7, r3
+ ldr r7, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r4, r4, r11
+ sbcs r5, r12, r5
+ add r12, r1, #16
+ sbcs r11, lr, r6
+ ldr r6, [r2, #20]
+ ldr lr, [r2, #16]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r9, [r0]
+ stmib r0, {r4, r5}
+ str r11, [r0, #12]
+ sbcs r2, r2, r6
+ str r1, [r0, #16]
+ ldr r6, [sp, #44] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r1, r3, r7
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r8, r1
+ str r2, [r0, #28]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ sbcs r2, r1, r2
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r3, r1, r3
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r7, r1, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r6, r1, r6
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r5, r10, r5
+ sbcs r4, r1, r4
+ add r1, r0, #36
+ stm r1, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end230:
+ .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_15L
+ .align 2
+ .type mcl_fp_shr1_15L,%function
+mcl_fp_shr1_15L: @ @mcl_fp_shr1_15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldmib r1, {r2, r3, r4, r5, r6, r10}
+ ldr r7, [r1]
+ ldr r11, [r1, #52]
+ ldr r8, [r1, #28]
+ ldr lr, [r1, #32]
+ ldr r12, [r1, #36]
+ ldr r9, [r1, #44]
+ str r7, [sp, #4] @ 4-byte Spill
+ lsr r7, r2, #1
+ str r11, [sp, #16] @ 4-byte Spill
+ orr r7, r7, r3, lsl #31
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ ldr r1, [r1, #56]
+ str r1, [sp, #20] @ 4-byte Spill
+ lsr r1, r4, #1
+ lsrs r4, r4, #1
+ str r7, [sp, #12] @ 4-byte Spill
+ rrx r3, r3
+ lsrs r2, r2, #1
+ orr r1, r1, r5, lsl #31
+ ldr r2, [sp, #4] @ 4-byte Reload
+ rrx r2, r2
+ str r2, [r0]
+ ldr r2, [sp] @ 4-byte Reload
+ stmib r0, {r2, r3}
+ str r1, [r0, #12]
+ lsrs r1, r6, #1
+ lsr r2, r12, #1
+ rrx r1, r5
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ lsr r1, r6, #1
+ orr r1, r1, r10, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r10
+ orr r2, r2, r7, lsl #31
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r12, #1
+ add r12, r0, #32
+ rrx r1, lr
+ lsrs r3, r9, #1
+ rrx r3, r7
+ lsrs r6, r5, #1
+ lsr r7, r9, #1
+ lsr r5, r5, #1
+ orr r7, r7, r4, lsl #31
+ rrx r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ orr r5, r5, r4, lsl #31
+ lsr r4, r4, #1
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end231:
+ .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add15L
+ .align 2
+ .type mcl_fp_add15L,%function
+mcl_fp_add15L: @ @mcl_fp_add15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r10, r4, r9
+ ldr r4, [r1, #24]
+ adcs r11, r5, r8
+ ldr r5, [r1, #20]
+ mov r8, r10
+ adcs r6, r6, lr
+ mov lr, r11
+ str r8, [r0]
+ adcs r9, r7, r12
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ str lr, [r0, #4]
+ str r9, [sp, #8] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #48]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r10, [sp, #32] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adcs r7, r4, r7
+ ldr r4, [r2, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r4, r7
+ ldr r4, [r2, #40]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r4, r7
+ ldr r4, [r2, #44]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r5, r4, r7
+ ldr r7, [r1, #48]
+ ldr r4, [sp, #40] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ adcs r12, r6, r7
+ ldr r7, [r1, #52]
+ ldr r6, [r2, #52]
+ ldr r1, [r1, #56]
+ ldr r2, [r2, #56]
+ str r4, [r0, #8]
+ str r9, [r0, #12]
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r9, [r0, #16]
+ str r10, [r0, #20]
+ add r7, r0, #40
+ adcs r2, r2, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r6, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #28]
+ str r1, [r0, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ stm r7, {r1, r5, r12}
+ str r6, [r0, #52]
+ str r2, [r0, #56]
+ mov r2, #0
+ adc r1, r2, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r5, r8, r6
+ sbcs r7, lr, r7
+ str r5, [sp, #4] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r9, r9, r1
+ ldr r1, [r3, #20]
+ sbcs r1, r10, r1
+ add r10, r3, #32
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r11, r11, r1
+ ldm r10, {r1, r2, r6, r10}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r8, [r3, #48]
+ ldr r7, [r3, #52]
+ ldr r3, [r3, #56]
+ sbcs r1, r5, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r4, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r2, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs lr, r1, r10
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r6, r12, r8
+ sbcs r5, r1, r7
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ sbc r3, r3, #0
+ tst r3, #1
+ bne .LBB232_2
+@ BB#1: @ %nocarry
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #8]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r9, [r0, #16]
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #28]
+ str r3, [r0, #32]
+ str r4, [r0, #36]
+ str r2, [r0, #40]
+ str lr, [r0, #44]
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r1, [r0, #56]
+.LBB232_2: @ %carry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end232:
+ .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF15L
+ .align 2
+ .type mcl_fp_addNF15L,%function
+mcl_fp_addNF15L: @ @mcl_fp_addNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ add r11, r3, #32
+ adds r10, r4, r9
+ ldr r4, [r1, #24]
+ adcs r9, r5, r8
+ ldr r5, [r1, #20]
+ str r10, [sp, #20] @ 4-byte Spill
+ adcs lr, r6, lr
+ ldr r6, [r1, #16]
+ str r9, [sp, #24] @ 4-byte Spill
+ adcs r8, r7, r12
+ ldr r7, [r2, #16]
+ str lr, [sp, #28] @ 4-byte Spill
+ str r8, [sp, #32] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r4
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r2, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r7, r6, r7
+ ldr r6, [r2, #48]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ adcs r7, r6, r7
+ ldr r6, [r2, #52]
+ ldr r2, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ ldr r1, [r1, #56]
+ adcs r7, r6, r7
+ adc r1, r2, r1
+ str r7, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #80] @ 4-byte Spill
+ ldmib r3, {r1, r5, r7}
+ ldr r2, [r3, #16]
+ ldr r4, [r3]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #44] @ 4-byte Spill
+ subs r2, r10, r4
+ sbcs r12, r9, r1
+ ldm r11, {r9, r10, r11}
+ ldr r1, [r3, #44]
+ ldr r4, [sp, #36] @ 4-byte Reload
+ sbcs lr, lr, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r6, r8, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r3, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r4, r1, r4
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r5, r5, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r8, r7, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r9, r1, r9
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r10, r1, r10
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r1, r11
+ ldr r11, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbc r7, r1, r7
+ asr r1, r7, #31
+ cmp r1, #0
+ movlt r2, r11
+ str r2, [r0]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r12, [r0, #4]
+ movlt lr, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movlt r6, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r6, [r0, #12]
+ movlt r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str r4, [r0, #20]
+ movlt r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r5, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r2, [r0, #52]
+ movlt r7, r1
+ str r7, [r0, #56]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end233:
+ .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub15L
+ .align 2
+ .type mcl_fp_sub15L,%function
+mcl_fp_sub15L: @ @mcl_fp_sub15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r9, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ str r9, [sp, #40] @ 4-byte Spill
+ sbcs r7, r7, r4
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r5, r7, r5
+ ldr r7, [r2, #28]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r11, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs lr, r6, r7
+ ldr r7, [r2, #40]
+ ldr r6, [r1, #40]
+ str lr, [sp, #28] @ 4-byte Spill
+ sbcs r12, r6, r7
+ ldr r7, [r2, #44]
+ ldr r6, [r1, #44]
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r6, [r2, #48]
+ ldr r7, [r1, #48]
+ sbcs r8, r7, r6
+ ldr r6, [r2, #52]
+ ldr r7, [r1, #52]
+ ldr r2, [r2, #56]
+ ldr r1, [r1, #56]
+ sbcs r6, r7, r6
+ ldr r7, [sp, #48] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r7, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0, #12]
+ str r9, [r0, #16]
+ mov r9, r6
+ mov r6, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ mov r1, r4
+ str r5, [r0, #20]
+ str r6, [r0, #24]
+ str r10, [r0, #28]
+ str r11, [r0, #32]
+ str lr, [r0, #36]
+ str r12, [r0, #40]
+ add r12, r0, #44
+ stm r12, {r1, r8, r9}
+ str r2, [r0, #56]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB234_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldmib r3, {r2, lr}
+ ldr r4, [r3, #16]
+ ldr r12, [r3, #12]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r3, #28]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r3]
+ adds r4, r4, r7
+ ldr r7, [r3, #52]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r11, r2, r7
+ ldr r2, [r3, #48]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r7, lr, r2
+ ldr r2, [r3, #44]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r2, r12, r2
+ add r12, r3, #32
+ ldm r12, {r3, r10, r12}
+ stm r0, {r4, r11}
+ str r7, [r0, #8]
+ str r2, [r0, #12]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r4, [sp] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r4, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r4, r7, r6
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r4, [r0, #24]
+ adcs r2, r7, r2
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs lr, r3, r2
+ ldr r3, [sp, #28] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [r0, #32]
+ adcs r3, r10, r3
+ adcs r7, r12, r7
+ str r3, [r0, #36]
+ adcs r6, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [r0, #40]
+ str r6, [r0, #44]
+ adcs r5, r1, r8
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r5, [r0, #48]
+ adcs r4, r1, r9
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #52]
+ adc r1, r2, r1
+ str r1, [r0, #56]
+.LBB234_2: @ %nocarry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end234:
+ .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF15L
+ .align 2
+ .type mcl_fp_subNF15L,%function
+mcl_fp_subNF15L: @ @mcl_fp_subNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r9, r2, #8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #56]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r2, {r10, r11}
+ ldm r9, {r5, r6, r7, r9}
+ ldr r0, [r2, #28]
+ ldr r8, [r2, #24]
+ ldr r2, [r1]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r1, {r0, lr}
+ ldr r4, [r1, #12]
+ subs r2, r2, r10
+ add r10, r3, #12
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ sbcs r11, r0, r11
+ ldr r0, [r1, #32]
+ sbcs lr, lr, r5
+ ldr r5, [r1, #28]
+ str r11, [sp] @ 4-byte Spill
+ sbcs r6, r4, r6
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ sbcs r7, r6, r7
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #20]
+ sbcs r1, r1, r9
+ str r1, [sp, #52] @ 4-byte Spill
+ sbcs r1, r7, r8
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r3, {r2, r5, r7}
+ ldm r10, {r6, r9, r10}
+ ldr r8, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r0, [r3, #28]
+ ldr r1, [r3, #24]
+ adds r2, r8, r2
+ adcs r3, r11, r5
+ mov r11, lr
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs lr, r11, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r9
+ adcs r6, r6, r10
+ adcs r7, r7, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r10, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #32] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r1, #0
+ movge r2, r8
+ movge lr, r11
+ str r2, [r12]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ movge r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r12, #4]
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r9, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r9, [r12, #28]
+ movge r10, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r10, [r12, #32]
+ movge r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r2, [r12, #36]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ cmp r1, #0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r12, #44]
+ movge r1, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r1, [r12, #48]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r1, [r12, #52]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #56]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end235:
+ .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add15L
+ .align 2
+ .type mcl_fpDbl_add15L,%function
+mcl_fpDbl_add15L: @ @mcl_fpDbl_add15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #184
+ sub sp, sp, #184
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ adds r4, r4, r7
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #148] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #164] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #168] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #172] @ 4-byte Spill
+ ldr r4, [r2, #112]
+ str r4, [sp, #176] @ 4-byte Spill
+ ldr r4, [r2, #116]
+ str r4, [sp, #180] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r10, r12
+ add r10, r1, #32
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ add r11, r3, #32
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r12, r2, r7
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r12, [sp, #84] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #88] @ 4-byte Spill
+ adcs r6, r1, r2
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #152] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #92] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r5, [sp, #100] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #168] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #172] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #180] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #128] @ 4-byte Spill
+ ldmib r3, {r2, lr}
+ ldr r1, [r3, #16]
+ ldr r8, [r3, #12]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3]
+ subs r1, r12, r1
+ sbcs r12, r9, r2
+ ldm r11, {r9, r10, r11}
+ ldr r2, [r3, #44]
+ sbcs lr, r6, lr
+ sbcs r6, r7, r8
+ ldr r7, [sp, #144] @ 4-byte Reload
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r3, r4, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r4, r5, r2
+ ldr r2, [sp, #152] @ 4-byte Reload
+ ldr r5, [sp, #140] @ 4-byte Reload
+ sbcs r5, r2, r5
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r8, r2, r7
+ ldr r2, [sp, #160] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbcs r9, r2, r9
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r10, r2, r10
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r2, r11
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r2, r7
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbc r2, r2, #0
+ ands r2, r2, #1
+ movne r1, r11
+ str r1, [r0, #60]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne lr, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r2, #0
+ str lr, [r0, #68]
+ movne r6, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r6, [r0, #72]
+ movne r3, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r3, [r0, #76]
+ ldr r3, [sp, #116] @ 4-byte Reload
+ movne r4, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ cmp r2, #0
+ str r4, [r0, #80]
+ movne r5, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str r5, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r9, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r2, #0
+ str r9, [r0, #92]
+ movne r10, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r10, [r0, #96]
+ movne r3, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r3, [r0, #100]
+ ldr r3, [sp, #124] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r2, #0
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #104]
+ movne r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #116]
+ add sp, sp, #184
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end236:
+ .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub15L
+ .align 2
+ .type mcl_fpDbl_sub15L,%function
+mcl_fpDbl_sub15L: @ @mcl_fpDbl_sub15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #184
+ sub sp, sp, #184
+ ldr r7, [r2, #96]
+ ldr r9, [r2]
+ add r10, r1, #32
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r2, #48]
+ sbcs r6, r6, lr
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ add lr, r1, #16
+ sbcs r7, r7, r12
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r5, [sp, #88] @ 4-byte Spill
+ str r4, [sp, #84] @ 4-byte Spill
+ str r6, [sp, #80] @ 4-byte Spill
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r11, [r3, #32]
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ sbcs r1, r9, r1
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ sbcs lr, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ sbcs r9, r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r9, [sp, #88] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #156] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #140] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #172] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #176] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #180] @ 4-byte Spill
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #128] @ 4-byte Spill
+ ldm r3, {r2, r5, r6}
+ ldr r4, [r3, #12]
+ ldr r12, [r3, #16]
+ ldr r8, [r3, #20]
+ ldr r10, [r3, #28]
+ ldr r7, [r3, #24]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ adds r1, lr, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ adcs r5, r9, r5
+ adcs r6, r2, r6
+ ldr r2, [sp, #148] @ 4-byte Reload
+ adcs r2, r2, r4
+ ldr r4, [sp, #156] @ 4-byte Reload
+ adcs r3, r3, r12
+ adcs r12, r4, r8
+ ldr r4, [sp, #160] @ 4-byte Reload
+ adcs r8, r4, r7
+ ldr r4, [sp, #164] @ 4-byte Reload
+ ldr r7, [sp, #140] @ 4-byte Reload
+ adcs r9, r4, r10
+ ldr r4, [sp, #104] @ 4-byte Reload
+ ldr r10, [sp, #128] @ 4-byte Reload
+ adcs r11, r7, r11
+ ldr r7, [sp, #136] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #144] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #168] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [sp, #172] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #176] @ 4-byte Reload
+ adcs r7, r7, r4
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #180] @ 4-byte Reload
+ adc r7, r7, r10
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, lr
+ str r1, [r0, #60]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ moveq r5, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r5, [r0, #64]
+ moveq r6, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r7, #0
+ str r6, [r0, #68]
+ moveq r2, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ str r2, [r0, #72]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str r3, [r0, #76]
+ moveq r12, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ cmp r7, #0
+ str r12, [r0, #80]
+ moveq r8, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r8, [r0, #84]
+ moveq r9, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r9, [r0, #88]
+ moveq r11, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #92]
+ moveq r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #96]
+ ldr r2, [sp, #112] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r2, [r0, #100]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #104]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #128] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #116]
+ add sp, sp, #184
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv512x32,%function
+.LmulPv512x32: @ @mulPv512x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r3, [r1, #56]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #56]
+ ldr r1, [r1, #60]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #60]
+ adc r1, r7, #0
+ str r1, [r0, #64]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre16L
+ .align 2
+ .type mcl_fp_mulUnitPre16L,%function
+mcl_fp_mulUnitPre16L: @ @mcl_fp_mulUnitPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ mov r4, r0
+ add r0, sp, #16
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #76]
+ add r11, sp, #40
+ add lr, sp, #16
+ ldr r10, [sp, #80]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #64]
+ str r0, [sp] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ add r0, r4, #24
+ str r10, [r4, #64]
+ stm r0, {r5, r6, r7, r8, r9, r11}
+ ldr r0, [sp] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre16L
+ .align 2
+ .type mcl_fpDbl_mulPre16L,%function
+mcl_fpDbl_mulPre16L: @ @mcl_fpDbl_mulPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #276
+ sub sp, sp, #276
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r0, r4, #64
+ add r1, r5, #32
+ add r2, r6, #32
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r11, r6, #32
+ ldm r11, {r9, r10, r11}
+ ldr r0, [r6, #44]
+ ldr r8, [r6, #60]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r6, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r6, #52]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r6, #56]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldm r6, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [r6, #24]
+ ldr r6, [r6, #28]
+ adds r0, r0, r9
+ str r0, [sp, #136] @ 4-byte Spill
+ adcs r0, r1, r10
+ str r0, [sp, #132] @ 4-byte Spill
+ adcs r0, r2, r11
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, lr, r0
+ add lr, r5, #44
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adcs r0, r6, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ mov r0, #0
+ ldm r5, {r8, r10, r11}
+ ldr r7, [r5, #32]
+ ldr r3, [r5, #36]
+ ldr r2, [r5, #40]
+ adc r6, r0, #0
+ ldr r0, [r5, #12]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm lr, {r0, r1, r12, lr}
+ ldr r9, [r5, #60]
+ adds r5, r8, r7
+ adcs r3, r10, r3
+ str r5, [sp, #180]
+ str r5, [sp, #144] @ 4-byte Spill
+ adcs r8, r11, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r3, [sp, #184]
+ str r3, [sp, #140] @ 4-byte Spill
+ str r8, [sp, #188]
+ adcs r11, r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ add r2, sp, #148
+ str r11, [sp, #192]
+ adcs r5, r0, r1
+ ldr r0, [sp, #96] @ 4-byte Reload
+ add r1, sp, #180
+ str r5, [sp, #196]
+ adcs r7, r0, r12
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #200]
+ adcs r10, r0, lr
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r10, [sp, #204]
+ adcs r0, r0, r9
+ str r0, [sp, #208]
+ mov r9, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #148]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #152]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #156]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #160]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #164]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #168]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #172]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #176]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ add r0, sp, #212
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldr r0, [sp, #136] @ 4-byte Reload
+ cmp r6, #0
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r3, [sp, #124] @ 4-byte Reload
+ moveq r9, r6
+ moveq r10, r6
+ moveq r7, r6
+ moveq r5, r6
+ moveq r11, r6
+ cmp r6, #0
+ moveq r1, r6
+ moveq r8, r6
+ moveq r2, r6
+ str r9, [sp, #104] @ 4-byte Spill
+ str r1, [sp, #144] @ 4-byte Spill
+ str r2, [sp, #140] @ 4-byte Spill
+ str r8, [sp, #96] @ 4-byte Spill
+ adds r12, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs lr, r2, r1
+ ldr r2, [sp, #128] @ 4-byte Reload
+ adcs r2, r8, r2
+ ldr r8, [sp, #104] @ 4-byte Reload
+ adcs r9, r11, r3
+ ldr r3, [sp, #120] @ 4-byte Reload
+ adcs r1, r5, r3
+ ldr r3, [sp, #116] @ 4-byte Reload
+ adcs r0, r7, r3
+ ldr r3, [sp, #112] @ 4-byte Reload
+ adcs r3, r10, r3
+ str r3, [sp, #124] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ adcs r3, r8, r3
+ ldr r8, [sp, #124] @ 4-byte Reload
+ str r3, [sp, #128] @ 4-byte Spill
+ mov r3, #0
+ adc r3, r3, #0
+ str r3, [sp, #136] @ 4-byte Spill
+ ldr r3, [sp, #100] @ 4-byte Reload
+ cmp r3, #0
+ moveq r0, r7
+ moveq r1, r5
+ moveq r9, r11
+ ldr r5, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ moveq r2, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ moveq lr, r0
+ ldr r0, [sp, #144] @ 4-byte Reload
+ cmp r3, #0
+ moveq r5, r3
+ and r3, r6, r3
+ ldr r6, [sp, #244]
+ moveq r8, r10
+ moveq r12, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ moveq r7, r0
+ adds r0, r12, r6
+ add r6, sp, #216
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ adcs r0, lr, r0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ adcs r10, r2, r0
+ ldr r0, [sp, #256]
+ adcs r0, r9, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ adcs r0, r1, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adc r0, r5, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldm r4, {r1, r12, lr}
+ ldr r5, [sp, #212]
+ ldr r8, [r4, #12]
+ ldm r6, {r2, r3, r6}
+ ldr r0, [sp, #236]
+ ldr r7, [sp, #240]
+ ldr r9, [r4, #72]
+ subs r1, r5, r1
+ ldr r5, [sp, #228]
+ sbcs r2, r2, r12
+ sbcs r12, r3, lr
+ ldr r3, [sp, #140] @ 4-byte Reload
+ sbcs r11, r6, r8
+ ldr r6, [r4, #16]
+ ldr r8, [r4, #68]
+ sbcs lr, r5, r6
+ ldr r5, [r4, #20]
+ ldr r6, [sp, #232]
+ sbcs r5, r6, r5
+ ldr r6, [r4, #24]
+ sbcs r6, r0, r6
+ ldr r0, [r4, #28]
+ sbcs r0, r7, r0
+ ldr r7, [r4, #32]
+ sbcs r3, r3, r7
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r4, #36]
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #136] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #40]
+ str r3, [sp, #76] @ 4-byte Spill
+ sbcs r3, r10, r7
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r4, #44]
+ ldr r10, [r4, #76]
+ str r3, [sp, #72] @ 4-byte Spill
+ ldr r3, [sp, #128] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #48]
+ str r3, [sp, #68] @ 4-byte Spill
+ ldr r3, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #128] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #52]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #60]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ sbcs r3, r3, r7
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbc r3, r3, #0
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #64]
+ subs r1, r1, r3
+ str r3, [sp, #80] @ 4-byte Spill
+ str r1, [sp, #44] @ 4-byte Spill
+ sbcs r1, r2, r8
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r12, r9
+ add r12, r4, #104
+ str r1, [sp, #36] @ 4-byte Spill
+ sbcs r1, r11, r10
+ ldr r11, [r4, #80]
+ str r1, [sp, #32] @ 4-byte Spill
+ sbcs r1, lr, r11
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r4, #84]
+ str r1, [sp, #112] @ 4-byte Spill
+ sbcs r1, r5, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r4, #88]
+ str r1, [sp, #108] @ 4-byte Spill
+ sbcs r1, r6, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r4, #92]
+ sbcs r0, r0, r1
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r4, #100]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #96]
+ str r1, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldm r12, {r2, r3, r12}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr lr, [r4, #116]
+ ldr r5, [r4, #120]
+ ldr r6, [r4, #124]
+ sbcs r0, r7, r0
+ str r12, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #88] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r12
+ mov r12, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, lr
+ mov lr, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r7, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbc r5, r0, #0
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r1, [r4, #36]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #44]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r1, r8, r1
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r4, #72]
+ adcs r1, r10, r1
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r1, r6
+ str r0, [r4, #80]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #84]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [r4, #88]
+ adcs r1, r1, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r1, [r4, #92]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r8, r0, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r8, [r4, #96]
+ adcs r2, r2, #0
+ adcs r3, r3, #0
+ adcs r7, r5, #0
+ adcs r6, r12, #0
+ adcs r5, lr, #0
+ adc r12, r0, #0
+ add r0, r4, #100
+ stm r0, {r1, r2, r3, r7}
+ str r6, [r4, #116]
+ str r5, [r4, #120]
+ str r12, [r4, #124]
+ add sp, sp, #276
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre16L
+ .align 2
+ .type mcl_fpDbl_sqrPre16L,%function
+mcl_fpDbl_sqrPre16L: @ @mcl_fpDbl_sqrPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #276
+ sub sp, sp, #276
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r1, r5, #32
+ add r0, r4, #64
+ mov r2, r1
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldm r5, {r8, r9, r10}
+ ldr r0, [r5, #12]
+ ldr r6, [r5, #32]
+ ldr r7, [r5, #36]
+ ldr r3, [r5, #40]
+ add lr, r5, #44
+ ldr r11, [r5, #16]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ adds r6, r8, r6
+ adcs r7, r9, r7
+ adcs r3, r10, r3
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r5, [sp, #136] @ 4-byte Reload
+ str r6, [sp, #180]
+ str r7, [sp, #184]
+ str r6, [sp, #148]
+ str r3, [sp, #128] @ 4-byte Spill
+ str r3, [sp, #188]
+ str r7, [sp, #152]
+ adcs r10, r5, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r11, r11, r1
+ str r10, [sp, #192]
+ add r1, sp, #180
+ str r11, [sp, #196]
+ adcs r8, r0, r2
+ ldr r0, [sp, #132] @ 4-byte Reload
+ add r2, sp, #148
+ str r8, [sp, #200]
+ adcs r9, r0, r12
+ ldr r0, [sp, #144] @ 4-byte Reload
+ str r9, [sp, #204]
+ adcs r5, r0, lr
+ add r0, sp, #156
+ str r5, [sp, #208]
+ stm r0, {r3, r10, r11}
+ mov r0, #0
+ str r8, [sp, #168]
+ str r9, [sp, #172]
+ str r5, [sp, #176]
+ adc r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ add r0, sp, #212
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adds r2, r6, r6
+ ldr r1, [sp, #244]
+ ldr r6, [sp, #248]
+ ldr lr, [sp, #264]
+ ldr r12, [sp, #268]
+ adcs r3, r7, r7
+ adcs r7, r0, r0
+ str r1, [sp, #128] @ 4-byte Spill
+ str r6, [sp, #116] @ 4-byte Spill
+ str r12, [sp, #108] @ 4-byte Spill
+ adcs r10, r10, r10
+ adcs r0, r11, r11
+ ldr r11, [sp, #252]
+ str r0, [sp, #144] @ 4-byte Spill
+ adcs r0, r8, r8
+ ldr r8, [sp, #260]
+ str r0, [sp, #140] @ 4-byte Spill
+ adcs r0, r9, r9
+ ldr r9, [sp, #256]
+ str r0, [sp, #120] @ 4-byte Spill
+ adc r0, r5, r5
+ adds r2, r1, r2
+ adcs r1, r6, r3
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r7, r11, r7
+ adcs r3, r9, r10
+ adcs r2, r8, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, lr, r1
+ adcs r10, r12, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r12, r0, r6
+ mov r6, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adc r5, r0, r5, lsr #31
+ cmp r0, #0
+ moveq r1, lr
+ moveq r2, r8
+ moveq r3, r9
+ moveq r7, r11
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [sp, #120] @ 4-byte Spill
+ add r3, sp, #216
+ moveq r10, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r0, #0
+ moveq r12, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ moveq r5, r0
+ str r12, [sp, #112] @ 4-byte Spill
+ moveq r6, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldm r4, {r12, lr}
+ ldr r9, [sp, #212]
+ ldr r11, [r4, #8]
+ ldr r8, [r4, #12]
+ moveq r1, r2
+ ldm r3, {r0, r2, r3}
+ subs r12, r9, r12
+ sbcs r9, r0, lr
+ ldr r0, [r4, #16]
+ sbcs r11, r2, r11
+ ldr r2, [sp, #228]
+ sbcs lr, r3, r8
+ ldr r8, [r4, #68]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #232]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #236]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #240]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ sbcs r3, r2, r0
+ ldr r0, [r4, #32]
+ str r0, [sp, #136] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #132] @ 4-byte Spill
+ sbcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #128] @ 4-byte Spill
+ sbcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r0, [sp, #124] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #52]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ sbcs r0, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r4, #56]
+ str r0, [sp, #144] @ 4-byte Spill
+ sbcs r0, r10, r0
+ ldr r10, [r4, #76]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #60]
+ str r0, [sp, #116] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ sbc r0, r5, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #64]
+ str r0, [sp, #80] @ 4-byte Spill
+ subs r0, r12, r0
+ add r12, r4, #104
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r9, r8
+ ldr r9, [r4, #72]
+ str r0, [sp, #40] @ 4-byte Spill
+ sbcs r0, r11, r9
+ ldr r11, [r4, #80]
+ str r0, [sp, #36] @ 4-byte Spill
+ sbcs r0, lr, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r4, #84]
+ str r0, [sp, #112] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r0, [sp, #108] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #100]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #92]
+ str r1, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r3, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #96]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldm r12, {r2, r3, r12}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr lr, [r4, #116]
+ ldr r5, [r4, #120]
+ ldr r6, [r4, #124]
+ sbcs r0, r7, r0
+ str r12, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #88] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r12
+ mov r12, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, lr
+ mov lr, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r7, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbc r5, r0, #0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r1, [r4, #36]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #44]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r1, r8, r1
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r4, #72]
+ adcs r1, r10, r1
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r1, r6
+ str r0, [r4, #80]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #84]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [r4, #88]
+ adcs r1, r1, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r1, [r4, #92]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r8, r0, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r8, [r4, #96]
+ adcs r2, r2, #0
+ adcs r3, r3, #0
+ adcs r7, r5, #0
+ adcs r6, r12, #0
+ adcs r5, lr, #0
+ adc r12, r0, #0
+ add r0, r4, #100
+ stm r0, {r1, r2, r3, r7}
+ str r6, [r4, #116]
+ str r5, [r4, #120]
+ str r12, [r4, #124]
+ add sp, sp, #276
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont16L
+ .align 2
+ .type mcl_fp_mont16L,%function
+mcl_fp_mont16L: @ @mcl_fp_mont16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #404
+ sub sp, sp, #404
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #132
+ add r6, sp, #2048
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, r6, #328
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #128] @ 4-byte Spill
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2376]
+ ldr r1, [sp, #2380]
+ str r0, [sp, #72] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2440]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2384]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #2388]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r1, [sp, #88] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2424]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2420]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2416]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2412]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2408]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2404]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2400]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2396]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2392]
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #2304
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2368]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #2332]
+ ldr r4, [sp, #2328]
+ ldr r8, [sp, #2324]
+ ldr r11, [sp, #2320]
+ ldr r9, [sp, #2304]
+ ldr r10, [sp, #2308]
+ ldr r6, [sp, #2312]
+ ldr r5, [sp, #2316]
+ add lr, sp, #2048
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #184
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #2248]
+ ldr r12, [sp, #2252]
+ ldr lr, [sp, #2256]
+ adds r0, r9, r0
+ ldr r9, [sp, #2272]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #2276]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2264]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2268]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2260]
+ adcs r1, r7, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ ldr r7, [sp, #2232]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #2244]
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #2240]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2280]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2236]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2160
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2224]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2188]
+ ldr r6, [sp, #2184]
+ ldr r8, [sp, #2180]
+ ldr r9, [sp, #2176]
+ ldr r10, [sp, #2160]
+ ldr r11, [sp, #2164]
+ ldr r4, [sp, #2168]
+ ldr r7, [sp, #2172]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #40
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2100]
+ ldr r3, [sp, #2104]
+ ldr r12, [sp, #2108]
+ ldr lr, [sp, #2112]
+ adds r0, r0, r10
+ ldr r10, [sp, #2132]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #2116]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #2088]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2128]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2124]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2120]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2096]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2136]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2092]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2016
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2080]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2044]
+ ldr r6, [sp, #2040]
+ ldr r8, [sp, #2036]
+ ldr r9, [sp, #2032]
+ ldr r10, [sp, #2016]
+ ldr r11, [sp, #2020]
+ ldr r4, [sp, #2024]
+ ldr r7, [sp, #2028]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #920
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1956]
+ ldr r3, [sp, #1960]
+ ldr r12, [sp, #1964]
+ ldr lr, [sp, #1968]
+ adds r0, r0, r10
+ ldr r10, [sp, #1988]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1972]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1944]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1984]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1980]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1976]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1952]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1992]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1948]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1872
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1936]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1900]
+ ldr r6, [sp, #1896]
+ ldr r8, [sp, #1892]
+ ldr r9, [sp, #1888]
+ ldr r10, [sp, #1872]
+ ldr r11, [sp, #1876]
+ ldr r4, [sp, #1880]
+ ldr r7, [sp, #1884]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #776
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1812]
+ ldr r3, [sp, #1816]
+ ldr r12, [sp, #1820]
+ ldr lr, [sp, #1824]
+ adds r0, r0, r10
+ ldr r10, [sp, #1844]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1828]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1800]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1840]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1836]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1832]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1808]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1804]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1728
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1792]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1756]
+ ldr r6, [sp, #1752]
+ ldr r8, [sp, #1748]
+ ldr r9, [sp, #1744]
+ ldr r10, [sp, #1728]
+ ldr r11, [sp, #1732]
+ ldr r4, [sp, #1736]
+ ldr r7, [sp, #1740]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #632
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1668]
+ ldr r3, [sp, #1672]
+ ldr r12, [sp, #1676]
+ ldr lr, [sp, #1680]
+ adds r0, r0, r10
+ ldr r10, [sp, #1700]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1684]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1656]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1696]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1692]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1688]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1664]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1584
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1648]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1612]
+ ldr r6, [sp, #1608]
+ ldr r8, [sp, #1604]
+ ldr r9, [sp, #1600]
+ ldr r10, [sp, #1584]
+ ldr r11, [sp, #1588]
+ ldr r4, [sp, #1592]
+ ldr r7, [sp, #1596]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #488
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1524]
+ ldr r3, [sp, #1528]
+ ldr r12, [sp, #1532]
+ ldr lr, [sp, #1536]
+ adds r0, r0, r10
+ ldr r10, [sp, #1556]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1540]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1512]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1552]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1548]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1544]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1520]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1440
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1504]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1468]
+ ldr r6, [sp, #1464]
+ ldr r8, [sp, #1460]
+ ldr r9, [sp, #1456]
+ ldr r10, [sp, #1440]
+ ldr r11, [sp, #1444]
+ ldr r4, [sp, #1448]
+ ldr r7, [sp, #1452]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #344
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1380]
+ ldr r3, [sp, #1384]
+ ldr r12, [sp, #1388]
+ ldr lr, [sp, #1392]
+ adds r0, r0, r10
+ ldr r10, [sp, #1412]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1396]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1368]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1408]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1404]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1400]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1376]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1296
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1360]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1324]
+ ldr r6, [sp, #1320]
+ ldr r8, [sp, #1316]
+ ldr r9, [sp, #1312]
+ ldr r10, [sp, #1296]
+ ldr r11, [sp, #1300]
+ ldr r4, [sp, #1304]
+ ldr r7, [sp, #1308]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, lr, #200
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ adds r0, r0, r10
+ ldr r10, [sp, #1268]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1252]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1224]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1264]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1260]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1256]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1232]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1152
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1216]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1180]
+ ldr r6, [sp, #1176]
+ ldr r8, [sp, #1172]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1152]
+ ldr r11, [sp, #1156]
+ ldr r4, [sp, #1160]
+ ldr r7, [sp, #1164]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, lr, #56
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ adds r0, r0, r10
+ ldr r10, [sp, #1124]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1108]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1080]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1120]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1116]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1112]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1088]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ ldr r11, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r7, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1072]
+ add r10, sp, #1008
+ ldr r4, [sp, #1032]
+ ldr r5, [sp, #1028]
+ ldr r6, [sp, #1024]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #936
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #952
+ adds r0, r0, r7
+ ldr r7, [sp, #948]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r2, r0, r8
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #976
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #940]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #944]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #936]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ adds r0, r2, r4
+ mul r1, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #116] @ 4-byte Spill
+ ldr r5, [sp, #112] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #112] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #864
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #928]
+ add r10, sp, #864
+ ldr r11, [sp, #892]
+ ldr r4, [sp, #888]
+ ldr r5, [sp, #884]
+ ldr r6, [sp, #880]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #792
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #808
+ adds r0, r0, r7
+ ldr r7, [sp, #804]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r2, r0, r8
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #832
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #796]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #800]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #792]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r1, r2, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #856]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #112] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #784]
+ add r10, sp, #720
+ ldr r5, [sp, #748]
+ ldr r6, [sp, #744]
+ ldr r7, [sp, #740]
+ ldr r11, [sp, #736]
+ add r0, sp, #648
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r4, [sp, #732]
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #652
+ adds r0, r0, r8
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #676
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #576
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #640]
+ add r11, sp, #584
+ ldr r6, [sp, #604]
+ ldr r5, [sp, #600]
+ ldr r8, [sp, #596]
+ ldr r9, [sp, #576]
+ ldr r10, [sp, #580]
+ add r0, sp, #504
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r7, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #508
+ adds r0, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #76] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #432
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #496]
+ add r11, sp, #440
+ ldr r6, [sp, #460]
+ ldr r5, [sp, #456]
+ ldr r8, [sp, #452]
+ ldr r9, [sp, #432]
+ ldr r10, [sp, #436]
+ add r0, sp, #360
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r7, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #364
+ adds r0, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #388
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #76] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #288
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #352]
+ add r11, sp, #296
+ ldr r7, [sp, #316]
+ ldr r9, [sp, #288]
+ ldr r5, [sp, #292]
+ add r0, sp, #216
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r6, r8, r10, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #232
+ adds r0, r0, r9
+ add r9, sp, #216
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ adcs r1, r1, r4
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #256
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #80] @ 4-byte Spill
+ ldm r9, {r4, r7, r9}
+ ldr r5, [sp, #228]
+ adds r8, r0, r4
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r1, r8, r0
+ ldr r0, [sp, #280]
+ str r1, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #136] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ adcs r9, r7, r9
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ adcs r7, r11, r1
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r8, r9, r2
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r5, r5, r3
+ mov r3, r6
+ str r8, [sp, #64] @ 4-byte Spill
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #164]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r10, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [r3]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ subs r12, r7, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldmib r3, {r0, r2, r6}
+ ldr r1, [r3, #32]
+ ldr r11, [r3, #40]
+ ldr r9, [r3, #28]
+ sbcs r7, r8, r0
+ ldr r0, [r3, #36]
+ sbcs r5, r5, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r10, r4, r6
+ ldr r6, [r3, #20]
+ ldr r4, [r3, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ sbcs r2, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r3, lr, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs lr, r0, r4
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r4, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r8, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r9, r0, r1
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ movne r7, r6
+ movne r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r12, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #72] @ 4-byte Reload
+ movne r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r5, [r0, #8]
+ movne r10, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r10, [r0, #12]
+ movne r2, r7
+ str r2, [r0, #16]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r4, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r4, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r3, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r3, [r0, #56]
+ movne r2, r1
+ str r2, [r0, #60]
+ add sp, sp, #404
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end242:
+ .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF16L
+ .align 2
+ .type mcl_fp_montNF16L,%function
+mcl_fp_montNF16L: @ @mcl_fp_montNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #396
+ sub sp, sp, #396
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #124
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #2368
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #120] @ 4-byte Spill
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2368]
+ ldr r1, [sp, #2372]
+ add r9, sp, #2048
+ str r0, [sp, #68] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2432]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #2376]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #2380]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2424]
+ str r1, [sp, #80] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2420]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2416]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2412]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2408]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2404]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2400]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2396]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2392]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2388]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2384]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, r9, #248
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2360]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r5, [sp, #2324]
+ ldr r6, [sp, #2320]
+ ldr r7, [sp, #2316]
+ ldr r8, [sp, #2312]
+ ldr r10, [sp, #2296]
+ ldr r11, [sp, #2300]
+ ldr r4, [sp, #2304]
+ ldr r9, [sp, #2308]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2332]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2328]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #2224
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #2236]
+ ldr r3, [sp, #2240]
+ ldr r12, [sp, #2244]
+ ldr lr, [sp, #2248]
+ adds r0, r10, r0
+ ldr r10, [sp, #2268]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2252]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #2264]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2260]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #2224]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2256]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r6, r11, r6
+ ldr r1, [sp, #2232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2280]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2276]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2272]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #2228]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #104
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2216]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #2180]
+ ldr r7, [sp, #2176]
+ ldr r5, [sp, #2172]
+ ldr r8, [sp, #2168]
+ ldr r9, [sp, #2152]
+ ldr r10, [sp, #2156]
+ ldr r11, [sp, #2160]
+ ldr r4, [sp, #2164]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #2080
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #2092]
+ ldr r3, [sp, #2096]
+ ldr r12, [sp, #2100]
+ ldr lr, [sp, #2104]
+ adds r0, r0, r9
+ ldr r9, [sp, #2120]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2124]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #2108]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2116]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2112]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2080]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #2088]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2136]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2132]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2128]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2084]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #984
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2072]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #2036]
+ ldr r7, [sp, #2032]
+ ldr r5, [sp, #2028]
+ ldr r8, [sp, #2024]
+ ldr r9, [sp, #2008]
+ ldr r10, [sp, #2012]
+ ldr r11, [sp, #2016]
+ ldr r4, [sp, #2020]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1936
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1948]
+ ldr r3, [sp, #1952]
+ ldr r12, [sp, #1956]
+ ldr lr, [sp, #1960]
+ adds r0, r0, r9
+ ldr r9, [sp, #1976]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1980]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1964]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1972]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1968]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1936]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1944]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1988]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1984]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1940]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #840
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1928]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1892]
+ ldr r7, [sp, #1888]
+ ldr r5, [sp, #1884]
+ ldr r8, [sp, #1880]
+ ldr r9, [sp, #1864]
+ ldr r10, [sp, #1868]
+ ldr r11, [sp, #1872]
+ ldr r4, [sp, #1876]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1792
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1804]
+ ldr r3, [sp, #1808]
+ ldr r12, [sp, #1812]
+ ldr lr, [sp, #1816]
+ adds r0, r0, r9
+ ldr r9, [sp, #1832]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1836]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1820]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1828]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1824]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1792]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1800]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #696
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1784]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1748]
+ ldr r7, [sp, #1744]
+ ldr r5, [sp, #1740]
+ ldr r8, [sp, #1736]
+ ldr r9, [sp, #1720]
+ ldr r10, [sp, #1724]
+ ldr r11, [sp, #1728]
+ ldr r4, [sp, #1732]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1756]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1752]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1648
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1660]
+ ldr r3, [sp, #1664]
+ ldr r12, [sp, #1668]
+ ldr lr, [sp, #1672]
+ adds r0, r0, r9
+ ldr r9, [sp, #1688]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1692]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1676]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1684]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1680]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1648]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #552
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1640]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1604]
+ ldr r7, [sp, #1600]
+ ldr r5, [sp, #1596]
+ ldr r8, [sp, #1592]
+ ldr r9, [sp, #1576]
+ ldr r10, [sp, #1580]
+ ldr r11, [sp, #1584]
+ ldr r4, [sp, #1588]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1612]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1608]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1504
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1516]
+ ldr r3, [sp, #1520]
+ ldr r12, [sp, #1524]
+ ldr lr, [sp, #1528]
+ adds r0, r0, r9
+ ldr r9, [sp, #1544]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1548]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1532]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1540]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1536]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1504]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1508]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #408
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1496]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1460]
+ ldr r7, [sp, #1456]
+ ldr r5, [sp, #1452]
+ ldr r8, [sp, #1448]
+ ldr r9, [sp, #1432]
+ ldr r10, [sp, #1436]
+ ldr r11, [sp, #1440]
+ ldr r4, [sp, #1444]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1360
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1372]
+ ldr r3, [sp, #1376]
+ ldr r12, [sp, #1380]
+ ldr lr, [sp, #1384]
+ adds r0, r0, r9
+ ldr r9, [sp, #1400]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1404]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1388]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1396]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1392]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1360]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1368]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #264
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1352]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1316]
+ ldr r7, [sp, #1312]
+ ldr r5, [sp, #1308]
+ ldr r8, [sp, #1304]
+ ldr r9, [sp, #1288]
+ ldr r10, [sp, #1292]
+ ldr r11, [sp, #1296]
+ ldr r4, [sp, #1300]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #1216
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1228]
+ ldr r3, [sp, #1232]
+ ldr r12, [sp, #1236]
+ ldr lr, [sp, #1240]
+ adds r0, r0, r9
+ ldr r9, [sp, #1256]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1260]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1244]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1252]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1248]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1216]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #120
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1208]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1172]
+ ldr r7, [sp, #1168]
+ ldr r5, [sp, #1164]
+ ldr r8, [sp, #1160]
+ ldr r9, [sp, #1144]
+ ldr r10, [sp, #1148]
+ ldr r11, [sp, #1152]
+ ldr r4, [sp, #1156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #1072
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1084]
+ ldr r3, [sp, #1088]
+ ldr r12, [sp, #1092]
+ ldr lr, [sp, #1096]
+ adds r0, r0, r9
+ ldr r9, [sp, #1112]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1116]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1100]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1108]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1104]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1072]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1080]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ adcs r0, r7, r0
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #1000
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1064]
+ add r11, sp, #1000
+ ldr r6, [sp, #1024]
+ ldr r5, [sp, #1020]
+ ldr r8, [sp, #1016]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r4, [sp, #1012]
+ ldr r2, [r0, #40]
+ add r0, sp, #928
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #944
+ adds r0, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r2, r0, r10
+ ldr r0, [sp, #112] @ 4-byte Reload
+ add r10, sp, #968
+ adcs r0, r0, r11
+ ldr r11, [sp, #932]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #940]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #928]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #936]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ adds r0, r2, r5
+ mul r1, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #108] @ 4-byte Spill
+ ldr r6, [sp, #104] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #104] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #856
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #920]
+ add r11, sp, #856
+ ldr r4, [sp, #884]
+ ldr r7, [sp, #880]
+ ldr r5, [sp, #876]
+ ldr r6, [sp, #872]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #784
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #800
+ adds r0, r0, r8
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #824
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #788]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #792]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #796]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #784]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r1, r2, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #848]
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #712
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #776]
+ ldr r11, [sp, #740]
+ ldr r8, [sp, #736]
+ ldr r9, [sp, #732]
+ ldr r10, [sp, #728]
+ ldr r6, [sp, #712]
+ ldr r7, [sp, #716]
+ ldr r5, [sp, #720]
+ ldr r4, [sp, #724]
+ add r0, sp, #640
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #748]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #744]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #644
+ adds r0, r0, r6
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #668
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #640]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #568
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #632]
+ ldr r6, [sp, #596]
+ ldr r7, [sp, #592]
+ ldr r8, [sp, #588]
+ ldr r5, [sp, #584]
+ ldr r9, [sp, #568]
+ ldr r10, [sp, #572]
+ ldr r4, [sp, #576]
+ ldr r11, [sp, #580]
+ add r0, sp, #496
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #604]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #600]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #500
+ adds r0, r0, r9
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #524
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #496]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #424
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #488]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #448]
+ ldr r8, [sp, #444]
+ ldr r5, [sp, #440]
+ ldr r9, [sp, #424]
+ ldr r10, [sp, #428]
+ ldr r4, [sp, #432]
+ ldr r11, [sp, #436]
+ add r0, sp, #352
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #460]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #456]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #356
+ adds r0, r0, r9
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #380
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #352]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #280
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #344]
+ add r11, sp, #284
+ ldr r8, [sp, #308]
+ ldr r9, [sp, #304]
+ ldr r10, [sp, #300]
+ ldr r7, [sp, #280]
+ add r0, sp, #208
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #316]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r11}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #224
+ adds r0, r0, r7
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #248
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r8
+ add r8, sp, #208
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #76] @ 4-byte Spill
+ ldm r8, {r4, r5, r6, r8}
+ adds r9, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r1, r9, r0
+ ldr r0, [sp, #272]
+ str r1, [sp, #60] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r7, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r11, r11, r5
+ ldr r5, [sp, #124] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r8, r5, r8
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r10, r0, #0
+ add r0, sp, #136
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #136
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r9, r0
+ ldr r0, [sp, #152]
+ adcs r4, r11, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, r2
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs lr, r8, r3
+ mov r3, r7
+ str r6, [sp, #56] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r5, r5, r0
+ ldr r0, [sp, #156]
+ str r5, [sp, #68] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adc r10, r10, r0
+ ldm r3, {r0, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r12, r4, r0
+ ldr r0, [r3, #32]
+ sbcs r4, r6, r7
+ ldr r7, [r3, #60]
+ sbcs r6, lr, r1
+ add lr, r3, #16
+ ldr r1, [r3, #28]
+ sbcs r8, r5, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm lr, {r0, r5, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ sbcs r2, r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r3, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs lr, r11, lr
+ sbcs r5, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbc r1, r10, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ movlt r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ movlt r4, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ ldr r12, [sp, #64] @ 4-byte Reload
+ movlt r6, r4
+ cmp r1, #0
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r6, [r0, #8]
+ ldr r6, [sp, #68] @ 4-byte Reload
+ movlt r2, r9
+ movlt r8, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r8, [r0, #12]
+ str r2, [r0, #16]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ movlt lr, r11
+ str r3, [r0, #20]
+ ldr r3, [sp, #132] @ 4-byte Reload
+ str lr, [r0, #24]
+ ldr lr, [sp, #72] @ 4-byte Reload
+ movlt r5, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r5, [r0, #28]
+ ldr r5, [sp, #80] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #32]
+ movlt lr, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str lr, [r0, #36]
+ movlt r6, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r6, [r0, #40]
+ movlt r5, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ cmp r1, #0
+ str r5, [r0, #44]
+ movlt r4, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r4, [r0, #48]
+ movlt r3, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r3, [r0, #52]
+ movlt r7, r2
+ cmp r1, #0
+ movlt r1, r10
+ str r7, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #396
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end243:
+ .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed16L
+ .align 2
+ .type mcl_fp_montRed16L,%function
+mcl_fp_montRed16L: @ @mcl_fp_montRed16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #356
+ sub sp, sp, #356
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ add r10, sp, #1024
+ ldr r0, [r3]
+ str r3, [sp, #216] @ 4-byte Spill
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #104] @ 4-byte Spill
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #220] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #60]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [r1, #120]
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [r1, #124]
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, r10, #280
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1368]
+ ldr r10, [sp, #1304]
+ ldr r1, [sp, #1312]
+ ldr r2, [sp, #1316]
+ ldr r3, [sp, #1320]
+ ldr r12, [sp, #1324]
+ ldr lr, [sp, #1328]
+ ldr r4, [sp, #1332]
+ ldr r5, [sp, #1336]
+ ldr r6, [sp, #1340]
+ ldr r8, [sp, #1344]
+ ldr r9, [sp, #1348]
+ ldr r11, [sp, #1352]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ adds r7, r7, r10
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ adcs r10, r7, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #1232
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1296]
+ ldr r4, [sp, #1232]
+ ldr r1, [sp, #1240]
+ ldr r2, [sp, #1244]
+ ldr r3, [sp, #1248]
+ ldr r9, [sp, #1252]
+ ldr r12, [sp, #1256]
+ ldr r11, [sp, #1260]
+ ldr lr, [sp, #1264]
+ ldr r6, [sp, #1268]
+ ldr r7, [sp, #1272]
+ ldr r8, [sp, #1276]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, r8, #136
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1224]
+ add r12, sp, #1168
+ ldr r9, [sp, #1204]
+ ldr r7, [sp, #1200]
+ ldr r6, [sp, #1196]
+ ldr r5, [sp, #1192]
+ ldr lr, [sp, #1188]
+ ldr r10, [sp, #1184]
+ ldr r8, [sp, #1164]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ ldm r12, {r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r8, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1088
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1152]
+ add r9, sp, #1120
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #1088]
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #1096]
+ ldr r2, [sp, #1100]
+ ldr r3, [sp, #1104]
+ ldr r12, [sp, #1108]
+ ldr lr, [sp, #1112]
+ ldr r11, [sp, #1116]
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1016
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1080]
+ add r11, sp, #1016
+ ldr r6, [sp, #1060]
+ ldr r7, [sp, #1056]
+ ldr r5, [sp, #1052]
+ ldr lr, [sp, #1048]
+ ldr r12, [sp, #1044]
+ ldr r10, [sp, #1040]
+ ldr r9, [sp, #1036]
+ ldr r3, [sp, #1032]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r11}
+ adds r0, r8, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #944
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1008]
+ add r9, sp, #976
+ add lr, sp, #948
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #944]
+ ldr r11, [sp, #972]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #936]
+ add lr, sp, #888
+ add r8, sp, #872
+ ldr r6, [sp, #916]
+ ldr r7, [sp, #912]
+ ldr r5, [sp, #908]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r3, r9, r10, r12, lr}
+ ldm r8, {r0, r1, r2, r8}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #800
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #864]
+ add r10, sp, #828
+ add lr, sp, #804
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #728
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #792]
+ add r8, sp, #760
+ add lr, sp, #748
+ add r12, sp, #728
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #656
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #720]
+ add r10, sp, #684
+ add lr, sp, #660
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #656]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #584
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #648]
+ add r8, sp, #616
+ add lr, sp, #604
+ add r12, sp, #584
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #644]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #576]
+ add r10, sp, #540
+ add lr, sp, #516
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #512]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #504]
+ add r8, sp, #472
+ add lr, sp, #460
+ add r12, sp, #440
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #432]
+ add r10, sp, #396
+ add lr, sp, #372
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r4, r6
+ adcs r0, r0, r7
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv512x32(PLT)
+ add r5, sp, #296
+ add r7, sp, #336
+ add lr, sp, #312
+ ldm r5, {r0, r1, r3, r5}
+ ldr r9, [sp, #356]
+ adds r0, r4, r0
+ adcs r8, r11, r1
+ ldr r11, [sp, #352]
+ mul r0, r8, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #348]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #212] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r10, r0, r11
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #224
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r12, [sp, #92] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #96] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r0, [sp, #240]
+ str lr, [sp, #100] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r4, [sp, #104] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #248]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r5, [sp, #108] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #252]
+ ldr r1, [sp, #208] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ adcs r11, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r11, [sp, #116] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ adcs r10, r10, r0
+ ldr r0, [sp, #280]
+ str r10, [sp, #128] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #284]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r8, [sp, #132] @ 4-byte Spill
+ adcs r6, r6, r0
+ ldr r0, [sp, #288]
+ adcs r3, r1, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #192] @ 4-byte Reload
+ subs r1, r12, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ sbcs r2, r2, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ sbcs r12, lr, r0
+ ldr r0, [sp, #168] @ 4-byte Reload
+ sbcs lr, r4, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ sbcs r4, r5, r0
+ ldr r0, [sp, #176] @ 4-byte Reload
+ sbcs r5, r7, r0
+ ldr r0, [sp, #180] @ 4-byte Reload
+ ldr r7, [sp, #208] @ 4-byte Reload
+ sbcs r9, r7, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #204] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #212] @ 4-byte Reload
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [sp, #148] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #152] @ 4-byte Reload
+ sbcs r0, r11, r0
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ sbcs r0, r10, r0
+ mov r10, r6
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ sbcs r7, r8, r0
+ ldr r0, [sp, #164] @ 4-byte Reload
+ mov r8, r3
+ sbcs r11, r6, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ sbcs r6, r3, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbc r3, r0, #0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ands r3, r3, #1
+ movne r1, r0
+ ldr r0, [sp, #200] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #172] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r3, #0
+ str r12, [r0, #8]
+ movne lr, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str lr, [r0, #12]
+ movne r4, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r4, [r0, #16]
+ movne r5, r1
+ ldr r1, [sp, #208] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r0, #20]
+ movne r9, r1
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r9, [r0, #24]
+ movne r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ movne r1, r2
+ cmp r3, #0
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #216] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #184] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r2, [r0, #40]
+ ldr r2, [sp, #188] @ 4-byte Reload
+ movne r2, r1
+ cmp r3, #0
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #44]
+ ldr r2, [sp, #128] @ 4-byte Reload
+ movne r11, r10
+ movne r1, r2
+ str r1, [r0, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ movne r7, r1
+ cmp r3, #0
+ movne r6, r8
+ str r7, [r0, #52]
+ str r11, [r0, #56]
+ str r6, [r0, #60]
+ add sp, sp, #356
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end244:
+ .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre16L
+ .align 2
+ .type mcl_fp_addPre16L,%function
+mcl_fp_addPre16L: @ @mcl_fp_addPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r1, {r3, r8}
+ ldr r5, [r1, #8]
+ ldr r6, [r1, #12]
+ ldm r2, {r7, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r9, [r1, #32]
+ ldr r11, [r1, #52]
+ adds r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ adcs r7, r12, r8
+ ldr r8, [r2, #24]
+ add r12, r1, #16
+ adcs r5, lr, r5
+ ldr lr, [r2, #16]
+ adcs r6, r4, r6
+ ldr r4, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r10, [sp, #28] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r10, [r0]
+ str r7, [r0, #4]
+ str r5, [r0, #8]
+ str r6, [r0, #12]
+ adcs r2, r4, r2
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ adcs r1, r8, r3
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, r12
+ adcs r12, r1, r9
+ str r2, [r0, #28]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ adcs lr, r1, r2
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str lr, [r0, #36]
+ adcs r3, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #40]
+ adcs r7, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #44]
+ adcs r6, r1, r2
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [r0, #48]
+ adcs r5, r1, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [r0, #52]
+ adcs r4, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #56]
+ adcs r1, r1, r2
+ str r1, [r0, #60]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end245:
+ .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre16L
+ .align 2
+ .type mcl_fp_subPre16L,%function
+mcl_fp_subPre16L: @ @mcl_fp_subPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r2, {r3, r8}
+ ldr r5, [r2, #8]
+ ldr r6, [r2, #12]
+ ldm r1, {r7, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r9, [r1, #32]
+ ldr r11, [r1, #52]
+ subs r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r7, r12, r8
+ ldr r8, [r2, #24]
+ add r12, r1, #16
+ sbcs r5, lr, r5
+ ldr lr, [r2, #16]
+ sbcs r6, r4, r6
+ ldr r4, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r10, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r10, [r0]
+ str r7, [r0, #4]
+ str r5, [r0, #8]
+ str r6, [r0, #12]
+ sbcs r2, r2, r4
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ sbcs r1, r3, r8
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r9, r1
+ str r2, [r0, #28]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ sbcs lr, r2, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str lr, [r0, #36]
+ sbcs r3, r2, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #40]
+ sbcs r7, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #44]
+ sbcs r6, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [r0, #48]
+ sbcs r5, r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [r0, #52]
+ sbcs r4, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #56]
+ sbcs r1, r2, r1
+ str r1, [r0, #60]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end246:
+ .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_16L
+ .align 2
+ .type mcl_fp_shr1_16L,%function
+mcl_fp_shr1_16L: @ @mcl_fp_shr1_16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldr r3, [r1, #16]
+ ldr r2, [r1, #20]
+ ldr r12, [r1, #24]
+ ldr r11, [r1, #28]
+ ldm r1, {r4, r5, r6, r7}
+ ldr r8, [r1, #56]
+ ldr lr, [r1, #32]
+ ldr r9, [r1, #36]
+ ldr r10, [r1, #40]
+ str r4, [sp, #4] @ 4-byte Spill
+ lsr r4, r5, #1
+ str r8, [sp, #16] @ 4-byte Spill
+ orr r4, r4, r6, lsl #31
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r1, #44]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r1, #48]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r1, #52]
+ ldr r1, [r1, #60]
+ str r1, [sp, #20] @ 4-byte Spill
+ lsr r1, r7, #1
+ lsrs r7, r7, #1
+ rrx r6, r6
+ lsrs r5, r5, #1
+ orr r1, r1, r3, lsl #31
+ ldr r5, [sp, #4] @ 4-byte Reload
+ rrx r5, r5
+ str r5, [r0]
+ ldr r5, [sp] @ 4-byte Reload
+ stmib r0, {r5, r6}
+ str r1, [r0, #12]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ str r1, [r0, #16]
+ lsr r1, r2, #1
+ lsr r2, r9, #1
+ orr r1, r1, r12, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r11, #1
+ rrx r1, r12
+ str r1, [r0, #24]
+ lsr r1, r11, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r9, #1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ rrx r12, lr
+ orr lr, r2, r10, lsl #31
+ mov r2, r4
+ lsr r5, r2, #1
+ str r12, [r0, #32]
+ str lr, [r0, #36]
+ lsrs r3, r1, #1
+ lsr r7, r1, #1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ rrx r3, r10
+ lsrs r6, r2, #1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r3, [r0, #40]
+ orr r7, r7, r1, lsl #31
+ rrx r6, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ orr r5, r5, r2, lsl #31
+ str r7, [r0, #44]
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ lsrs r4, r1, #1
+ lsr r1, r1, #1
+ rrx r4, r2
+ str r4, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end247:
+ .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add16L
+ .align 2
+ .type mcl_fp_add16L,%function
+mcl_fp_add16L: @ @mcl_fp_add16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r5, r5, r8
+ mov r8, r9
+ adcs r6, r6, lr
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ str r8, [r0]
+ adcs r10, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ ldr lr, [sp, #36] @ 4-byte Reload
+ str r10, [sp] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str lr, [r0, #4]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r4
+ ldr r4, [r2, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r5, [r1, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #36]
+ ldr r4, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #40]
+ ldr r4, [r2, #40]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #44]
+ ldr r4, [r2, #44]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ adcs r11, r4, r5
+ ldr r4, [r1, #48]
+ ldr r5, [r2, #52]
+ str r11, [sp, #20] @ 4-byte Spill
+ adcs r12, r7, r4
+ ldr r7, [r1, #52]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #16] @ 4-byte Spill
+ adcs r6, r5, r7
+ ldr r7, [r1, #56]
+ ldr r5, [r2, #56]
+ ldr r1, [r1, #60]
+ ldr r2, [r2, #60]
+ str r4, [r0, #8]
+ str r10, [r0, #12]
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r9, [r0, #16]
+ str r6, [sp, #4] @ 4-byte Spill
+ adcs r5, r5, r7
+ str r10, [r0, #20]
+ add r7, r0, #40
+ adcs r2, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ stm r7, {r1, r11, r12}
+ str r6, [r0, #52]
+ str r5, [r0, #56]
+ str r2, [r0, #60]
+ mov r2, #0
+ mov r12, r5
+ add r11, r3, #32
+ adc r1, r2, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r5, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r8, r8, r5
+ sbcs lr, lr, r7
+ sbcs r1, r4, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r9, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r1, r10, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r1, r2, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldm r11, {r1, r2, r5, r7, r9, r10, r11}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r3, [r3, #60]
+ sbcs r1, r6, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r1, r2
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r2, r1, r5
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r5, r1, r7
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r7, r1, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r4, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r6, r12, r11
+ sbcs r1, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ sbc r3, r3, #0
+ tst r3, #1
+ bne .LBB248_2
+@ BB#1: @ %nocarry
+ stm r0, {r8, lr}
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r3, [r0, #8]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r3, [r0, #20]
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #32]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #36]
+ add r3, r0, #40
+ stm r3, {r2, r5, r7}
+ str r4, [r0, #52]
+ str r6, [r0, #56]
+ str r1, [r0, #60]
+.LBB248_2: @ %carry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end248:
+ .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF16L
+ .align 2
+ .type mcl_fp_addNF16L,%function
+mcl_fp_addNF16L: @ @mcl_fp_addNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ mov r12, r0
+ ldm r1, {r0, r9}
+ ldr r8, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r10, r4, r0
+ ldr r4, [r1, #20]
+ ldr r0, [r1, #24]
+ adcs r9, r5, r9
+ ldr r5, [r1, #16]
+ adcs r8, r6, r8
+ str r9, [sp, #4] @ 4-byte Spill
+ adcs r6, r7, lr
+ ldr r7, [r2, #16]
+ str r8, [sp, #8] @ 4-byte Spill
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, r5
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r4
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r0, r7, r0
+ ldr r7, [r2, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ adcs r0, r7, r0
+ ldr r7, [r2, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ adcs r0, r7, r0
+ ldr r7, [r2, #36]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ adcs r0, r7, r0
+ ldr r7, [r2, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ adcs r0, r7, r0
+ ldr r7, [r2, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ adcs r0, r7, r0
+ ldr r7, [r2, #48]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ adcs r0, r7, r0
+ ldr r7, [r2, #52]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ adcs r0, r7, r0
+ ldr r7, [r2, #56]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ ldr r1, [r2, #60]
+ adc r11, r1, r0
+ ldm r3, {r0, r7}
+ ldr r1, [r3, #8]
+ ldr r4, [r3, #12]
+ subs lr, r10, r0
+ ldr r0, [r3, #32]
+ sbcs r5, r9, r7
+ ldr r9, [sp, #44] @ 4-byte Reload
+ sbcs r7, r8, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r6, r4
+ ldr r4, [r3, #24]
+ ldr r6, [r3, #20]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ ldr r3, [r3, #16]
+ sbcs r1, r1, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ sbcs r2, r9, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #84] @ 4-byte Reload
+ sbcs r4, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ sbc r0, r11, r0
+ cmp r0, #0
+ movlt lr, r10
+ movlt r5, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str lr, [r12]
+ str r5, [r12, #4]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ ldr lr, [sp, #12] @ 4-byte Reload
+ movlt r7, r5
+ cmp r0, #0
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r7, [r12, #8]
+ ldr r7, [sp, #16] @ 4-byte Reload
+ movlt r2, r9
+ movlt r8, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r8, [r12, #12]
+ movlt r1, r7
+ cmp r0, #0
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r1, [r12, #16]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r12, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ movlt r3, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [r12, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movlt r4, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r4, [r12, #28]
+ ldr r4, [sp, #36] @ 4-byte Reload
+ movlt lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r0, #0
+ str lr, [r12, #32]
+ movlt r3, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r3, [r12, #36]
+ movlt r7, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r7, [r12, #40]
+ movlt r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r0, #0
+ str r6, [r12, #44]
+ movlt r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r5, [r12, #48]
+ movlt r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #52]
+ movlt r2, r1
+ cmp r0, #0
+ movlt r0, r11
+ str r2, [r12, #56]
+ str r0, [r12, #60]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end249:
+ .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub16L
+ .align 2
+ .type mcl_fp_sub16L,%function
+mcl_fp_sub16L: @ @mcl_fp_sub16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #64] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ sbcs r7, r7, r4
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r7, r7, r5
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ sbcs r11, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ sbcs r8, r6, r7
+ ldr r7, [r2, #40]
+ ldr r6, [r1, #40]
+ str r8, [sp, #52] @ 4-byte Spill
+ sbcs r5, r6, r7
+ ldr r7, [r2, #44]
+ ldr r6, [r1, #44]
+ str r5, [sp, #48] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r6, [r2, #48]
+ ldr r7, [r1, #48]
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs lr, r7, r6
+ ldr r6, [r2, #52]
+ ldr r7, [r1, #52]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r9, r7, r6
+ ldr r6, [r2, #56]
+ ldr r7, [r1, #56]
+ ldr r2, [r2, #60]
+ ldr r1, [r1, #60]
+ sbcs r6, r7, r6
+ sbcs r12, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r2, #0
+ str r6, [sp, #36] @ 4-byte Spill
+ sbc r2, r2, #0
+ str r12, [sp, #32] @ 4-byte Spill
+ tst r2, #1
+ str r1, [r0]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #20]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #24]
+ str r11, [r0, #28]
+ str r10, [r0, #32]
+ str r8, [r0, #36]
+ str r5, [r0, #40]
+ str r4, [r0, #44]
+ str lr, [r0, #48]
+ str r9, [r0, #52]
+ str r6, [r0, #56]
+ str r12, [r0, #60]
+ beq .LBB250_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #32]
+ ldr r8, [r3, #60]
+ str r11, [sp] @ 4-byte Spill
+ ldr r5, [r3]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #36]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #44]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldmib r3, {r4, r11, r12}
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ ldr lr, [r3, #20]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adds r5, r5, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r5, [r0]
+ adcs r4, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r4, [r0, #4]
+ adcs r1, r11, r1
+ ldr r11, [r3, #24]
+ adcs r6, r12, r7
+ str r1, [r0, #8]
+ ldr r12, [r3, #28]
+ ldr r3, [r3, #16]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r6, [r0, #12]
+ adcs r1, r3, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r3, lr, r2
+ ldr r2, [sp] @ 4-byte Reload
+ str r3, [r0, #20]
+ adcs r1, r11, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r3, r12, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs lr, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str lr, [r0, #32]
+ adcs r2, r1, r2
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r3, r1, r3
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r6, r1, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r5, r1, r7
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r4, r1, r9
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r7, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r12, r8, r1
+ add r1, r0, #36
+ stm r1, {r2, r3, r6}
+ str r5, [r0, #48]
+ add r0, r0, #52
+ stm r0, {r4, r7, r12}
+.LBB250_2: @ %nocarry
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end250:
+ .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF16L
+ .align 2
+ .type mcl_fp_subNF16L,%function
+mcl_fp_subNF16L: @ @mcl_fp_subNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldr r7, [r2, #32]
+ add r9, r2, #8
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #60]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldm r2, {r10, r11}
+ ldm r9, {r5, r6, r7, r9}
+ ldr r4, [r2, #24]
+ ldr r2, [r2, #28]
+ str r4, [sp, #60] @ 4-byte Spill
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r8, [r1, #40]
+ subs r2, r2, r10
+ str r2, [sp, #40] @ 4-byte Spill
+ sbcs r2, r12, r11
+ ldr r12, [r1, #36]
+ sbcs lr, lr, r5
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r5, [r1, #32]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [r1, #16]
+ str lr, [sp] @ 4-byte Spill
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r7, [r1, #20]
+ str r4, [sp, #52] @ 4-byte Spill
+ sbcs r4, r7, r9
+ ldr r7, [r1, #28]
+ ldr r1, [r1, #24]
+ str r4, [sp, #48] @ 4-byte Spill
+ sbcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r7, r1
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbc r2, r2, r1
+ ldr r1, [r3, #32]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldm r3, {r1, r4, r5, r6, r7, r8, r9, r10}
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r1, r3, r1
+ adcs r3, r11, r4
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r12, lr, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs lr, r5, r6
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r5, r5, r8
+ ldr r8, [sp, #88] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r10, r7, r10
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r7
+ ldr r8, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r7, r7, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adc r7, r2, r7
+ cmp r2, #0
+ movge r1, r6
+ movge r3, r11
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #4] @ 4-byte Reload
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ movge r12, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #8]
+ ldr r12, [sp, #12] @ 4-byte Reload
+ movge lr, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [r0, #12]
+ ldr lr, [sp, #16] @ 4-byte Reload
+ movge r4, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [r0, #16]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ movge r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r2, #0
+ str r5, [r0, #20]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ movge r9, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r9, [r0, #24]
+ movge r10, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r10, [r0, #28]
+ movge r12, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #32]
+ movge lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str lr, [r0, #36]
+ movge r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r7, [r0, #40]
+ movge r6, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp r2, #0
+ str r6, [r0, #44]
+ movge r5, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r5, [r0, #48]
+ movge r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #52]
+ movge r4, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ cmp r2, #0
+ movge r1, r2
+ str r4, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end251:
+ .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add16L
+ .align 2
+ .type mcl_fpDbl_add16L,%function
+mcl_fpDbl_add16L: @ @mcl_fpDbl_add16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #200
+ sub sp, sp, #200
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #164] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #160] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #156] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #180] @ 4-byte Spill
+ ldr r4, [r2, #112]
+ str r4, [sp, #184] @ 4-byte Spill
+ ldr r4, [r2, #116]
+ str r4, [sp, #188] @ 4-byte Spill
+ ldr r4, [r2, #120]
+ str r4, [sp, #192] @ 4-byte Spill
+ ldr r4, [r2, #124]
+ str r4, [sp, #196] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r12, [sp, #92] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #96] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r8, [sp, #100] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #168] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #136] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r10, r1, r2
+ ldr r1, [sp, #172] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r10, [sp, #88] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r11, r1, r2
+ ldr r1, [sp, #180] @ 4-byte Reload
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r11, [sp, #140] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #188] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r1, [sp, #192] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #196] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #128] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldr r1, [r3, #36]
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ subs r12, r12, r2
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs lr, r9, r7
+ sbcs r7, r8, r6
+ ldr r8, [r3, #32]
+ ldr r6, [r3, #24]
+ sbcs r9, r4, r5
+ ldr r5, [r3, #28]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r3, [r3, #16]
+ sbcs r2, r2, r3
+ sbcs r3, r10, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs r4, r1, r6
+ ldr r1, [sp, #176] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #164] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #160] @ 4-byte Reload
+ sbcs r10, r1, r6
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r11, r11, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #156] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r12, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne lr, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str lr, [r0, #68]
+ movne r7, r6
+ cmp r1, #0
+ str r7, [r0, #72]
+ ldr r7, [sp, #136] @ 4-byte Reload
+ movne r9, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ str r9, [r0, #76]
+ movne r2, r7
+ str r2, [r0, #80]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #172] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #84]
+ ldr r3, [sp, #132] @ 4-byte Reload
+ movne r4, r2
+ ldr r2, [sp, #176] @ 4-byte Reload
+ str r4, [r0, #88]
+ movne r5, r2
+ ldr r2, [sp, #164] @ 4-byte Reload
+ str r5, [r0, #92]
+ movne r8, r2
+ ldr r2, [sp, #160] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #96]
+ movne r10, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r10, [r0, #100]
+ movne r11, r2
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r11, [r0, #104]
+ movne r3, r2
+ ldr r2, [sp, #184] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #108]
+ ldr r3, [sp, #144] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r3, [r0, #112]
+ ldr r3, [sp, #148] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #192] @ 4-byte Reload
+ str r3, [r0, #116]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #196] @ 4-byte Reload
+ ldr r2, [sp, #156] @ 4-byte Reload
+ str r3, [r0, #120]
+ movne r2, r1
+ str r2, [r0, #124]
+ add sp, sp, #200
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end252:
+ .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub16L
+ .align 2
+ .type mcl_fpDbl_sub16L,%function
+mcl_fpDbl_sub16L: @ @mcl_fpDbl_sub16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #200
+ sub sp, sp, #200
+ ldr r7, [r2, #96]
+ ldr r9, [r2]
+ add r10, r1, #32
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldmib r2, {r8, r12, lr}
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #92] @ 4-byte Spill
+ sbcs r4, r5, r8
+ sbcs r6, r6, r12
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ sbcs r7, r7, lr
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r2, #44]
+ add lr, r1, #16
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r4, [sp, #88] @ 4-byte Spill
+ str r6, [sp, #84] @ 4-byte Spill
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ add r11, r3, #12
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ sbcs r1, r9, r1
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ sbcs r9, r7, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r9, [sp, #80] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r12, r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r12, [sp, #84] @ 4-byte Spill
+ sbcs lr, r2, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str lr, [sp, #88] @ 4-byte Spill
+ sbcs r4, r2, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r4, [sp, #92] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r5, r2
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #172] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #176] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #180] @ 4-byte Spill
+ ldr r2, [sp, #184] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [sp, #184] @ 4-byte Spill
+ ldr r2, [sp, #188] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #188] @ 4-byte Spill
+ ldr r2, [sp, #192] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r2, [sp, #192] @ 4-byte Spill
+ ldr r2, [sp, #196] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #196] @ 4-byte Spill
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldm r3, {r2, r6, r7}
+ ldm r11, {r5, r8, r11}
+ ldr r1, [r3, #28]
+ ldr r10, [r3, #24]
+ str r1, [sp, #112] @ 4-byte Spill
+ adds r1, r9, r2
+ ldr r9, [sp, #132] @ 4-byte Reload
+ adcs r2, r9, r6
+ ldr r6, [sp, #164] @ 4-byte Reload
+ adcs r3, r12, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r12, lr, r5
+ ldr r5, [sp, #160] @ 4-byte Reload
+ adcs lr, r4, r8
+ ldr r4, [sp, #156] @ 4-byte Reload
+ adcs r4, r4, r11
+ adcs r5, r5, r10
+ adcs r8, r6, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r11, r7, r6
+ ldr r7, [sp, #172] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #176] @ 4-byte Reload
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #136] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [sp, #180] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [sp, #184] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [sp, #188] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [sp, #192] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [sp, #196] @ 4-byte Reload
+ adc r7, r7, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #128] @ 4-byte Reload
+ ands r10, r7, #1
+ moveq r1, r6
+ moveq r2, r9
+ str r1, [r0, #64]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #68]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ cmp r10, #0
+ str r3, [r0, #72]
+ moveq r12, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r12, [r0, #76]
+ moveq lr, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str lr, [r0, #80]
+ moveq r4, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ cmp r10, #0
+ str r4, [r0, #84]
+ moveq r5, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r5, [r0, #88]
+ moveq r8, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r8, [r0, #92]
+ moveq r11, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r10, #0
+ str r11, [r0, #96]
+ moveq r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #100]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #104]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ cmp r10, #0
+ str r2, [r0, #108]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #188] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #116]
+ ldr r2, [sp, #152] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #196] @ 4-byte Reload
+ cmp r10, #0
+ str r2, [r0, #120]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #124]
+ add sp, sp, #200
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv544x32,%function
+.LmulPv544x32: @ @mulPv544x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r3, [r1, #56]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #56]
+ ldr r3, [r1, #60]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #60]
+ ldr r1, [r1, #64]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ adc r2, r7, #0
+ str r1, [r0, #64]
+ str r2, [r0, #68]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre17L
+ .align 2
+ .type mcl_fp_mulUnitPre17L,%function
+mcl_fp_mulUnitPre17L: @ @mcl_fp_mulUnitPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ mov r4, r0
+ add r0, sp, #24
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #92]
+ add r11, sp, #48
+ add lr, sp, #24
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #88]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #84]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #80]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ add r0, r4, #24
+ stm r0, {r5, r6, r7, r8, r9, r10, r11}
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #64]
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r0, [r4, #68]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre17L
+ .align 2
+ .type mcl_fpDbl_mulPre17L,%function
+mcl_fpDbl_mulPre17L: @ @mcl_fpDbl_mulPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #340
+ sub sp, sp, #340
+ .pad #1024
+ sub sp, sp, #1024
+ mov r9, r2
+ add r6, sp, #1024
+ mov r4, r0
+ str r1, [sp, #128] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r9]
+ add r0, r6, #264
+ str r9, [sp, #124] @ 4-byte Spill
+ str r4, [sp, #132] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1356]
+ ldr r1, [sp, #1292]
+ ldr r2, [r9, #4]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #1296]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #1300]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r1, [sp, #44] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [r4]
+ add r0, sp, #1216
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1284]
+ add lr, sp, #1216
+ ldr r10, [sp, #1256]
+ ldr r8, [sp, #1252]
+ ldr r7, [sp, #1248]
+ ldr r6, [sp, #1244]
+ ldr r5, [sp, #1240]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r9, #8]
+ add r9, sp, #1024
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, r9, #120
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1212]
+ ldr r9, [sp, #56] @ 4-byte Reload
+ ldr r8, [sp, #1184]
+ ldr r7, [sp, #1180]
+ ldr r11, [sp, #1176]
+ ldr r5, [sp, #1172]
+ ldr lr, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r12, [sp, #1160]
+ ldr r1, [sp, #1148]
+ ldr r2, [sp, #1152]
+ ldr r3, [sp, #1156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ adds r0, r0, r9
+ str r0, [r4, #8]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #1072
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1140]
+ add lr, sp, #1072
+ ldr r10, [sp, #1112]
+ ldr r9, [sp, #1108]
+ ldr r8, [sp, #1104]
+ ldr r7, [sp, #1100]
+ ldr r6, [sp, #1096]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #12]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #128] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #1000
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1068]
+ add r11, sp, #1024
+ add lr, sp, #1000
+ ldr r6, [sp, #1040]
+ ldr r5, [sp, #1036]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r8, [sp, #132] @ 4-byte Reload
+ adds r0, r0, r7
+ str r0, [r8, #16]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r2, [r5, #20]
+ adcs r0, r6, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #928
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #996]
+ add r11, sp, #952
+ add lr, sp, #928
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r4
+ ldr r4, [sp, #132] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #856
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #924]
+ add r11, sp, #880
+ add lr, sp, #856
+ ldr r7, [sp, #896]
+ ldr r5, [sp, #892]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #24]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #124] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #28]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #784
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #852]
+ add r10, sp, #808
+ add lr, sp, #784
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adds r0, r0, r7
+ str r0, [r11, #28]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #32]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #712
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #780]
+ add r8, sp, #748
+ add r11, sp, #736
+ add lr, sp, #712
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r8, {r4, r6, r7, r8}
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ str r0, [r5, #32]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r6, #36]
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #708]
+ add r10, sp, #664
+ add lr, sp, #640
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r5, #36]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #40]
+ mov r6, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #568
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #636]
+ add r11, sp, #592
+ add lr, sp, #568
+ ldr r7, [sp, #608]
+ ldr r4, [sp, #604]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #628]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #620]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r5, #40]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r2, [r4, #44]
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #496
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #564]
+ add r10, sp, #520
+ add lr, sp, #496
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #48] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r5, #44]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #128] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r8, r0
+ mov r8, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #492]
+ add lr, sp, #428
+ ldr r9, [sp, #460]
+ ldr r7, [sp, #456]
+ ldr r11, [sp, #452]
+ ldr r10, [sp, #448]
+ ldr r3, [sp, #424]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #120] @ 4-byte Reload
+ adds r3, r3, r6
+ ldr r6, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r3, [r6, #48]
+ ldr r3, [r8, #52]
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #380
+ add r12, sp, #356
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r9, r10, r11}
+ ldr r5, [sp, #376]
+ ldr lr, [sp, #352]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r7, lr, r7
+ ldr lr, [r8, #56]
+ str r7, [r6, #52]
+ ldr r6, [sp, #120] @ 4-byte Reload
+ add r7, sp, #280
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r7
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #348]
+ add r8, sp, #316
+ add r11, sp, #304
+ add lr, sp, #280
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ str r0, [r5, #56]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r8, #60]
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #276]
+ add lr, sp, #228
+ add r12, sp, #212
+ ldr r6, [sp, #248]
+ ldr r9, [sp, #244]
+ ldr r4, [sp, #240]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r3, [sp, #208]
+ ldm r12, {r0, r1, r2, r12}
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adds r3, r3, r7
+ str r3, [r5, #60]
+ ldr r5, [sp, #120] @ 4-byte Reload
+ ldr r3, [r8, #64]
+ adcs r8, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r5, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #136
+ add r11, sp, #172
+ add lr, sp, #152
+ ldm r3, {r0, r1, r2, r3}
+ adds r7, r0, r8
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r6, r1, r5
+ adcs r5, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r4, r3, r0
+ ldr r0, [sp, #204]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldm lr, {r0, r2, r3, r12, lr}
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r7, [r1, #64]
+ str r6, [r1, #68]
+ str r5, [r1, #72]
+ ldr r5, [sp, #44] @ 4-byte Reload
+ str r4, [r1, #76]
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r1, #80]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [r1, #84]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [r1, #88]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r2, r12, r2
+ str r2, [r1, #92]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r1, #96]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r2, [r1, #100]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r1, #104]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r2, r10, r2
+ str r2, [r1, #108]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r1, #112]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r0, [r1, #116]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r0, [r1, #120]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r0, [r1, #124]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r0, [r1, #128]
+ adc r2, r2, #0
+ str r2, [r1, #132]
+ add sp, sp, #340
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre17L
+ .align 2
+ .type mcl_fpDbl_sqrPre17L,%function
+mcl_fpDbl_sqrPre17L: @ @mcl_fpDbl_sqrPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #332
+ sub sp, sp, #332
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r1
+ mov r4, r0
+ add r0, sp, #1280
+ ldr r2, [r7]
+ str r7, [sp, #120] @ 4-byte Spill
+ str r4, [sp, #124] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1348]
+ ldr r1, [sp, #1284]
+ ldr r2, [r7, #4]
+ add r11, sp, #1024
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #1288]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #1292]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r1, [sp, #40] @ 4-byte Spill
+ mov r1, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [r4]
+ add r0, r11, #184
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1276]
+ add r10, sp, #1232
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldr r0, [sp, #1208]
+ ldr r11, [sp, #52] @ 4-byte Reload
+ ldr lr, [sp, #1228]
+ ldr r12, [sp, #1224]
+ ldr r1, [sp, #1212]
+ ldr r2, [sp, #1216]
+ ldr r3, [sp, #1220]
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #8]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1136
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1204]
+ add r12, sp, #1136
+ ldr r6, [sp, #1176]
+ ldr r4, [sp, #1172]
+ ldr lr, [sp, #1168]
+ ldr r11, [sp, #1164]
+ ldr r10, [sp, #1160]
+ ldr r9, [sp, #1156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #124] @ 4-byte Reload
+ adds r0, r0, r5
+ str r0, [r8, #8]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #12]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r4, r0
+ add r4, sp, #1024
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, r4, #40
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1132]
+ add r11, sp, #1088
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1064]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ ldr lr, [sp, #1084]
+ ldr r12, [sp, #1080]
+ ldr r1, [sp, #1068]
+ ldr r2, [sp, #1072]
+ ldr r3, [sp, #1076]
+ adds r0, r0, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [r4, #12]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #16]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #992
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1060]
+ add lr, sp, #1012
+ add r12, sp, #992
+ ldr r6, [sp, #1032]
+ ldr r5, [sp, #1028]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #16]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #20]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #920
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #988]
+ add r10, sp, #944
+ add lr, sp, #920
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #968]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #964]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #20]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #24]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #916]
+ add lr, sp, #868
+ add r12, sp, #848
+ ldr r6, [sp, #888]
+ ldr r5, [sp, #884]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #24]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #28]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #844]
+ add r10, sp, #800
+ add lr, sp, #776
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #704
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #772]
+ add lr, sp, #724
+ add r12, sp, #704
+ ldr r6, [sp, #744]
+ ldr r5, [sp, #740]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #756]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #32]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #700]
+ add r10, sp, #656
+ add lr, sp, #632
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #36]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #40]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #560
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #628]
+ add r7, sp, #596
+ add lr, sp, #580
+ add r12, sp, #560
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #620]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r7, {r5, r6, r7}
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #40]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [r1, #44]
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #556]
+ add r10, sp, #512
+ add lr, sp, #488
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #44] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #44]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r9, #48]
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #416
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #484]
+ add r10, sp, #444
+ add lr, sp, #420
+ mov r8, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #460]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #456]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r5, r6, r10}
+ ldr r11, [sp, #440]
+ ldr r12, [sp, #416]
+ ldm lr, {r0, r1, r2, r3, lr}
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adds r7, r12, r7
+ str r7, [r4, #48]
+ ldr r7, [sp, #116] @ 4-byte Reload
+ mov r4, r9
+ add r9, sp, #344
+ ldr r12, [r4, #52]
+ adcs r7, r0, r7
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r9
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #412]
+ add r11, sp, #368
+ add r12, sp, #348
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r4, r5, r9, r10, r11}
+ ldr lr, [sp, #344]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r7, lr, r7
+ str r7, [r8, #52]
+ mov r7, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ add r8, sp, #272
+ ldr lr, [r7, #56]
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, r8
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #340]
+ add r8, sp, #308
+ add lr, sp, #292
+ add r12, sp, #272
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #324]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adds r0, r0, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r6, #60]
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #268]
+ add r9, sp, #232
+ add lr, sp, #204
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r5, r8, r9}
+ ldr r10, [sp, #228]
+ ldr r12, [sp, #200]
+ ldm lr, {r0, r1, r2, r3, r11, lr}
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adds r7, r12, r7
+ ldr r12, [r6, #64]
+ str r7, [r4, #60]
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r7, r0, r4
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #128
+ add r11, sp, #164
+ add lr, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r7, r0, r7
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r1, r4
+ adcs r5, r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r4, r3, r0
+ ldr r0, [sp, #196]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldm lr, {r0, r2, r3, r12, lr}
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r7, [r1, #64]
+ str r6, [r1, #68]
+ str r5, [r1, #72]
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r4, [r1, #76]
+ ldr r4, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r1, #80]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [r1, #84]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [r1, #88]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r2, r12, r2
+ str r2, [r1, #92]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r1, #96]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r2, [r1, #100]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r1, #104]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r10, r2
+ str r2, [r1, #108]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r1, #112]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [r1, #116]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r0, [r1, #120]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r0, [r1, #124]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r0, [r1, #128]
+ adc r2, r2, #0
+ str r2, [r1, #132]
+ add sp, sp, #332
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont17L
+ .align 2
+ .type mcl_fp_mont17L,%function
+mcl_fp_mont17L: @ @mcl_fp_mont17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #556
+ sub sp, sp, #556
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #140
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #2528
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #136] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2528]
+ ldr r1, [sp, #2532]
+ mul r2, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2596]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #2536]
+ add r5, sp, #2048
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #2592]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2540]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #2588]
+ str r1, [sp, #92] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2584]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2580]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2576]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2572]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2568]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #2564]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2560]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2556]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2552]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2548]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2544]
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, r5, #408
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2524]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r4, [sp, #2484]
+ ldr r10, [sp, #2480]
+ ldr r6, [sp, #2476]
+ ldr r7, [sp, #2472]
+ ldr r11, [sp, #2456]
+ ldr r9, [sp, #2460]
+ ldr r5, [sp, #2464]
+ ldr r8, [sp, #2468]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2520]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2516]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2512]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2508]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #2384
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #2400]
+ ldr r12, [sp, #2404]
+ ldr lr, [sp, #2408]
+ adds r0, r11, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #2424]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2416]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2384]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #2420]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #2428]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2412]
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #2396]
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #2392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2452]
+ str r8, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2448]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2444]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2440]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #2388]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #264
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2380]
+ add r10, sp, #2320
+ ldr r7, [sp, #2340]
+ ldr r6, [sp, #2336]
+ ldr r4, [sp, #2312]
+ ldr r11, [sp, #2316]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2376]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2372]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2368]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #2240
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2252]
+ ldr r3, [sp, #2256]
+ ldr r12, [sp, #2260]
+ ldr lr, [sp, #2264]
+ adds r0, r0, r4
+ ldr r4, [sp, #2268]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2272]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2240]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2280]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2284]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2276]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2248]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2308]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2304]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2300]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2244]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #120
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2236]
+ add r10, sp, #2176
+ ldr r7, [sp, #2196]
+ ldr r6, [sp, #2192]
+ ldr r4, [sp, #2168]
+ ldr r11, [sp, #2172]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2228]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2224]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #2096
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2108]
+ ldr r3, [sp, #2112]
+ ldr r12, [sp, #2116]
+ ldr lr, [sp, #2120]
+ adds r0, r0, r4
+ ldr r4, [sp, #2124]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2128]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2096]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2136]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2140]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2132]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2104]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2164]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2160]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2100]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #1000
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2092]
+ add r10, sp, #2032
+ ldr r7, [sp, #2052]
+ ldr r6, [sp, #2048]
+ ldr r4, [sp, #2024]
+ ldr r11, [sp, #2028]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2088]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2084]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2080]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1952
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1964]
+ ldr r3, [sp, #1968]
+ ldr r12, [sp, #1972]
+ ldr lr, [sp, #1976]
+ adds r0, r0, r4
+ ldr r4, [sp, #1980]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1984]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1952]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1992]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1996]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1988]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1960]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #856
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1948]
+ add r10, sp, #1888
+ ldr r7, [sp, #1908]
+ ldr r6, [sp, #1904]
+ ldr r4, [sp, #1880]
+ ldr r11, [sp, #1884]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1944]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1940]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1936]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1808
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1820]
+ ldr r3, [sp, #1824]
+ ldr r12, [sp, #1828]
+ ldr lr, [sp, #1832]
+ adds r0, r0, r4
+ ldr r4, [sp, #1836]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1840]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1808]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1848]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1852]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1844]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1816]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #712
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1804]
+ add r10, sp, #1744
+ ldr r7, [sp, #1764]
+ ldr r6, [sp, #1760]
+ ldr r4, [sp, #1736]
+ ldr r11, [sp, #1740]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1800]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1664
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1676]
+ ldr r3, [sp, #1680]
+ ldr r12, [sp, #1684]
+ ldr lr, [sp, #1688]
+ adds r0, r0, r4
+ ldr r4, [sp, #1692]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1696]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1664]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1704]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1708]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1700]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1672]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1732]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1728]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1668]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #568
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1660]
+ add r10, sp, #1600
+ ldr r7, [sp, #1620]
+ ldr r6, [sp, #1616]
+ ldr r4, [sp, #1592]
+ ldr r11, [sp, #1596]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1520
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1532]
+ ldr r3, [sp, #1536]
+ ldr r12, [sp, #1540]
+ ldr lr, [sp, #1544]
+ adds r0, r0, r4
+ ldr r4, [sp, #1548]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1552]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1520]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1560]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1564]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1556]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1528]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #424
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1516]
+ add r10, sp, #1456
+ ldr r7, [sp, #1476]
+ ldr r6, [sp, #1472]
+ ldr r4, [sp, #1448]
+ ldr r11, [sp, #1452]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1508]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #1376
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1388]
+ ldr r3, [sp, #1392]
+ ldr r12, [sp, #1396]
+ ldr lr, [sp, #1400]
+ adds r0, r0, r4
+ ldr r4, [sp, #1404]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1408]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1376]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1416]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1420]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1412]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1384]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #280
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1372]
+ add r10, sp, #1312
+ ldr r7, [sp, #1332]
+ ldr r6, [sp, #1328]
+ ldr r4, [sp, #1304]
+ ldr r11, [sp, #1308]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #1232
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1244]
+ ldr r3, [sp, #1248]
+ ldr r12, [sp, #1252]
+ ldr lr, [sp, #1256]
+ adds r0, r0, r4
+ ldr r4, [sp, #1260]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1264]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1232]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1272]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1276]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1268]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1240]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r8, r5
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, r4, #136
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1228]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r11, [sp, #1184]
+ ldr r4, [sp, #1180]
+ ldr r6, [sp, #1176]
+ ldr r7, [sp, #1160]
+ ldr r8, [sp, #1164]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1172]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #1088
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #1104
+ adds r0, r0, r7
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1088
+ adcs r1, r1, r9
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #60] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r7, [sp, #1100]
+ ldr r10, [sp, #1140]
+ ldr r9, [sp, #1136]
+ adds r0, r0, r4
+ ldr r4, [sp, #1128]
+ mul r1, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ ldr r5, [sp, #1132]
+ str r1, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #128] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #124] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1016
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1084]
+ add r10, sp, #1016
+ ldr r11, [sp, #1044]
+ ldr r4, [sp, #1040]
+ ldr r5, [sp, #1036]
+ ldr r6, [sp, #1032]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #944
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #132] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #960
+ adds r0, r0, r7
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #944
+ adcs r1, r1, r9
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #984
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r7, [sp, #956]
+ adds r1, r0, r4
+ ldr r0, [sp, #136] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #1012]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #80] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #940]
+ add r11, sp, #880
+ ldr r5, [sp, #900]
+ ldr r4, [sp, #896]
+ ldr r9, [sp, #872]
+ ldr r10, [sp, #876]
+ add r0, sp, #800
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #936]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #932]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #928]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #924]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #920]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #916]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #912]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #908]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #904]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r11}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #132] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #804
+ adds r0, r0, r9
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #828
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #868]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #132] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #728
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #796]
+ add r9, sp, #732
+ ldr r5, [sp, #756]
+ ldr r11, [sp, #752]
+ ldr r8, [sp, #748]
+ ldr r10, [sp, #728]
+ add r0, sp, #656
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #792]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #788]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #784]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #660
+ adds r0, r0, r10
+ add r10, sp, #684
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #656]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #584
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #652]
+ add r9, sp, #588
+ ldr r5, [sp, #612]
+ ldr r11, [sp, #608]
+ ldr r8, [sp, #604]
+ ldr r10, [sp, #584]
+ add r0, sp, #512
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #648]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #644]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #640]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #516
+ adds r0, r0, r10
+ add r10, sp, #540
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #512]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #440
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #508]
+ add r9, sp, #444
+ ldr r5, [sp, #468]
+ ldr r11, [sp, #464]
+ ldr r8, [sp, #460]
+ ldr r10, [sp, #440]
+ add r0, sp, #368
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #504]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #500]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #496]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #372
+ adds r0, r0, r10
+ add r10, sp, #396
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #296
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #364]
+ add r11, sp, #312
+ add r7, sp, #300
+ ldr r9, [sp, #324]
+ add r0, sp, #224
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #360]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #356]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #352]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r10, r11}
+ ldr r8, [sp, #296]
+ ldm r7, {r5, r6, r7}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #64]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #240
+ adds r0, r0, r8
+ ldr r8, [sp, #232]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #268]
+ adcs r1, r1, r6
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #236]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #224]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r11
+ ldr r11, [sp, #228]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r9, r0, r4
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r4, [sp, #264]
+ str r1, [sp, #88] @ 4-byte Spill
+ mul r1, r9, r0
+ ldr r0, [sp, #292]
+ str r1, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #144] @ 4-byte Reload
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adcs r11, r10, r11
+ adcs r10, r6, r8
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #144] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #152
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r9, r0
+ adcs r4, r11, r1
+ ldr r0, [sp, #168]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r6, r10, r2
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r9, r7, r3
+ mov r3, r5
+ str r6, [sp, #60] @ 4-byte Spill
+ str r9, [sp, #68] @ 4-byte Spill
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r11, r1, r0
+ ldr r0, [sp, #196]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r11, [sp, #76] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r8, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ adcs r0, r1, r0
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ adcs r0, r1, r0
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r3, {r1, r2, r7}
+ ldr r0, [r3, #64]
+ ldr r5, [r3, #12]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ subs r12, r4, r1
+ ldr r1, [r3, #40]
+ sbcs r4, r6, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r6, r9, r7
+ ldr r7, [r3, #32]
+ ldr r9, [r3, #28]
+ sbcs r10, lr, r5
+ ldr r5, [r3, #16]
+ ldr lr, [r3, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ sbcs r2, r2, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ ldr r3, [sp, #104] @ 4-byte Reload
+ sbcs r3, r3, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs lr, r0, lr
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r5, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r8, r0, r7
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ sbcs r9, r11, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r11, r0, r1
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r4, r7
+ movne r12, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ movne r6, r4
+ cmp r1, #0
+ str r6, [r0, #8]
+ ldr r6, [sp, #72] @ 4-byte Reload
+ movne r10, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str r10, [r0, #12]
+ movne r2, r6
+ str r2, [r0, #16]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r5, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r5, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r3, [r0, #56]
+ movne r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #64]
+ add sp, sp, #556
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end258:
+ .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF17L
+ .align 2
+ .type mcl_fp_montNF17L,%function
+mcl_fp_montNF17L: @ @mcl_fp_montNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #132
+ add r6, sp, #2048
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, r6, #472
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #128] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2520]
+ ldr r1, [sp, #2524]
+ str r0, [sp, #72] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2588]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2528]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2584]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #2532]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2580]
+ str r1, [sp, #88] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2576]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2572]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2568]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2564]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2560]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2556]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2552]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2548]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2540]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2536]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #2448
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2516]
+ add r11, sp, #2448
+ ldr r9, [sp, #2476]
+ ldr r4, [sp, #2472]
+ ldr r7, [sp, #2468]
+ ldr r6, [sp, #2464]
+ add lr, sp, #2048
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2508]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2484]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2480]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2460]
+ ldr r2, [r0, #4]
+ add r0, lr, #328
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r8, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ add r10, sp, #2416
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2440]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2376]
+ ldr r6, [sp, #100] @ 4-byte Reload
+ ldr r0, [sp, #2380]
+ ldr r1, [sp, #2384]
+ ldr r2, [sp, #2388]
+ ldr r3, [sp, #2392]
+ ldr r12, [sp, #2396]
+ ldr lr, [sp, #2400]
+ ldr r4, [sp, #2404]
+ ldr r5, [sp, #2408]
+ ldr r11, [sp, #2412]
+ adds r7, r6, r7
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2304
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2372]
+ add r11, sp, #2304
+ ldr r4, [sp, #2332]
+ ldr r5, [sp, #2328]
+ ldr r6, [sp, #2324]
+ ldr r7, [sp, #2320]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2368]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #184
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #2272
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2300]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2232]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #2236]
+ ldr r1, [sp, #2240]
+ ldr r2, [sp, #2244]
+ ldr r3, [sp, #2248]
+ ldr r12, [sp, #2252]
+ ldr lr, [sp, #2256]
+ ldr r4, [sp, #2260]
+ ldr r5, [sp, #2264]
+ ldr r11, [sp, #2268]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2160
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2228]
+ add r11, sp, #2160
+ ldr r4, [sp, #2188]
+ ldr r5, [sp, #2184]
+ ldr r6, [sp, #2180]
+ ldr r7, [sp, #2176]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #40
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #2128
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2088]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #2092]
+ ldr r1, [sp, #2096]
+ ldr r2, [sp, #2100]
+ ldr r3, [sp, #2104]
+ ldr r12, [sp, #2108]
+ ldr lr, [sp, #2112]
+ ldr r4, [sp, #2116]
+ ldr r5, [sp, #2120]
+ ldr r11, [sp, #2124]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2016
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2084]
+ add r11, sp, #2016
+ ldr r4, [sp, #2044]
+ ldr r5, [sp, #2040]
+ ldr r6, [sp, #2036]
+ ldr r7, [sp, #2032]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2080]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #920
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1984
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1944]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1948]
+ ldr r1, [sp, #1952]
+ ldr r2, [sp, #1956]
+ ldr r3, [sp, #1960]
+ ldr r12, [sp, #1964]
+ ldr lr, [sp, #1968]
+ ldr r4, [sp, #1972]
+ ldr r5, [sp, #1976]
+ ldr r11, [sp, #1980]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1872
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1940]
+ add r11, sp, #1872
+ ldr r4, [sp, #1900]
+ ldr r5, [sp, #1896]
+ ldr r6, [sp, #1892]
+ ldr r7, [sp, #1888]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1936]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #776
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1840
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1800]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1804]
+ ldr r1, [sp, #1808]
+ ldr r2, [sp, #1812]
+ ldr r3, [sp, #1816]
+ ldr r12, [sp, #1820]
+ ldr lr, [sp, #1824]
+ ldr r4, [sp, #1828]
+ ldr r5, [sp, #1832]
+ ldr r11, [sp, #1836]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1728
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1796]
+ add r11, sp, #1728
+ ldr r4, [sp, #1756]
+ ldr r5, [sp, #1752]
+ ldr r6, [sp, #1748]
+ ldr r7, [sp, #1744]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #632
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1696
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1656]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1660]
+ ldr r1, [sp, #1664]
+ ldr r2, [sp, #1668]
+ ldr r3, [sp, #1672]
+ ldr r12, [sp, #1676]
+ ldr lr, [sp, #1680]
+ ldr r4, [sp, #1684]
+ ldr r5, [sp, #1688]
+ ldr r11, [sp, #1692]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1584
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1652]
+ add r11, sp, #1584
+ ldr r4, [sp, #1612]
+ ldr r5, [sp, #1608]
+ ldr r6, [sp, #1604]
+ ldr r7, [sp, #1600]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #488
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1552
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1512]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1516]
+ ldr r1, [sp, #1520]
+ ldr r2, [sp, #1524]
+ ldr r3, [sp, #1528]
+ ldr r12, [sp, #1532]
+ ldr lr, [sp, #1536]
+ ldr r4, [sp, #1540]
+ ldr r5, [sp, #1544]
+ ldr r11, [sp, #1548]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1440
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1508]
+ add r11, sp, #1440
+ ldr r4, [sp, #1468]
+ ldr r5, [sp, #1464]
+ ldr r6, [sp, #1460]
+ ldr r7, [sp, #1456]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, lr, #344
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1408
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1368]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1372]
+ ldr r1, [sp, #1376]
+ ldr r2, [sp, #1380]
+ ldr r3, [sp, #1384]
+ ldr r12, [sp, #1388]
+ ldr lr, [sp, #1392]
+ ldr r4, [sp, #1396]
+ ldr r5, [sp, #1400]
+ ldr r11, [sp, #1404]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1296
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1364]
+ add r11, sp, #1296
+ ldr r4, [sp, #1324]
+ ldr r5, [sp, #1320]
+ ldr r6, [sp, #1316]
+ ldr r7, [sp, #1312]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, lr, #200
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1264
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1224]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1228]
+ ldr r1, [sp, #1232]
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ ldr r4, [sp, #1252]
+ ldr r5, [sp, #1256]
+ ldr r11, [sp, #1260]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1152
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1220]
+ add r11, sp, #1152
+ ldr r4, [sp, #1176]
+ ldr r6, [sp, #1172]
+ ldr r7, [sp, #1168]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, lr, #56
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r8, [sp, #1092]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1120
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1084]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1080]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ adds r0, r2, r4
+ mul r1, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ ldr r0, [sp, #1096]
+ ldr r1, [sp, #1100]
+ ldr r2, [sp, #1104]
+ ldr r3, [sp, #1108]
+ ldr r12, [sp, #1112]
+ ldr lr, [sp, #1116]
+ adcs r7, r11, r7
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #116] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #112] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1076]
+ add r11, sp, #1008
+ ldr r4, [sp, #1036]
+ ldr r5, [sp, #1032]
+ ldr r6, [sp, #1028]
+ ldr r7, [sp, #1024]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #936
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #952
+ adds r0, r0, r8
+ add r8, sp, #936
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #976
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r4, r6, r7, r8}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adds r1, r2, r4
+ mul r2, r1, r0
+ ldr r0, [sp, #1004]
+ str r1, [sp, #124] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #864
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #932]
+ ldr r5, [sp, #892]
+ ldr r7, [sp, #888]
+ ldr r4, [sp, #884]
+ ldr r9, [sp, #880]
+ ldr r8, [sp, #864]
+ ldr r11, [sp, #868]
+ ldr r10, [sp, #872]
+ ldr r6, [sp, #876]
+ add r0, sp, #792
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #928]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #924]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #920]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #916]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #912]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #908]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #904]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #900]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #896]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #796
+ adds r0, r0, r8
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #820
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #792]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #720
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #788]
+ add r11, sp, #728
+ ldr r5, [sp, #748]
+ ldr r9, [sp, #744]
+ ldr r10, [sp, #720]
+ ldr r6, [sp, #724]
+ add r0, sp, #648
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #784]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #652
+ adds r0, r0, r10
+ add r10, sp, #676
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #576
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #644]
+ add r11, sp, #584
+ ldr r5, [sp, #604]
+ ldr r9, [sp, #600]
+ ldr r10, [sp, #576]
+ ldr r6, [sp, #580]
+ add r0, sp, #504
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #640]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #508
+ adds r0, r0, r10
+ add r10, sp, #532
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #432
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #500]
+ add r11, sp, #440
+ ldr r5, [sp, #460]
+ ldr r9, [sp, #456]
+ ldr r10, [sp, #432]
+ ldr r6, [sp, #436]
+ add r0, sp, #360
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #496]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #364
+ adds r0, r0, r10
+ add r10, sp, #388
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #288
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #356]
+ add r8, sp, #288
+ ldr r9, [sp, #316]
+ ldr r10, [sp, #312]
+ ldr r11, [sp, #308]
+ ldr r6, [sp, #304]
+ add r0, sp, #216
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #352]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r8, {r4, r5, r8}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #300]
+ ldr r2, [r1, #64]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #232
+ adds r0, r0, r4
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r2, r0, r5
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #216
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r7, [sp, #228]
+ ldr r5, [sp, #260]
+ adds r11, r2, r4
+ ldr r4, [sp, #256]
+ mul r1, r11, r0
+ ldr r0, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #136] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ adcs r8, r10, r8
+ ldr r10, [sp, #140] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r11, r0
+ adcs r4, r8, r1
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r8, r9, r2
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r9, r7, r3
+ mov r3, r10
+ str r8, [sp, #60] @ 4-byte Spill
+ str r9, [sp, #64] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #164]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #68] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str lr, [sp, #48] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r6, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adc r1, r1, r0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldm r3, {r0, r2, r7}
+ ldr r6, [r3, #12]
+ ldr r11, [r3, #36]
+ ldr r10, [r3, #32]
+ subs r12, r4, r0
+ ldr r0, [r3, #64]
+ sbcs r4, r8, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r8, r9, r7
+ ldr r7, [r3, #20]
+ sbcs r9, r5, r6
+ ldr r6, [r3, #24]
+ ldr r5, [r3, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ sbcs r2, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r3, lr, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ sbcs lr, r0, r6
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r5, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r6, r0, r10
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r10, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ asr r1, r10, #31
+ cmp r1, #0
+ movlt r4, r7
+ movlt r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #64] @ 4-byte Reload
+ movlt r8, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #8]
+ movlt r9, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r9, [r0, #12]
+ movlt r2, r4
+ str r2, [r0, #16]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r6, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #72] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #76] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r3, [r0, #56]
+ movlt r2, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #60]
+ movlt r10, r1
+ str r10, [r0, #64]
+ add sp, sp, #548
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end259:
+ .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed17L
+ .align 2
+ .type mcl_fp_montRed17L,%function
+mcl_fp_montRed17L: @ @mcl_fp_montRed17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #444
+ sub sp, sp, #444
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #236] @ 4-byte Spill
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #108] @ 4-byte Spill
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #232] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #60]
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [r3, #64]
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r1, #128]
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [r1, #132]
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r1, #120]
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [r1, #124]
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #1392
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1460]
+ ldr r11, [sp, #1392]
+ ldr r1, [sp, #1400]
+ ldr r2, [sp, #1404]
+ ldr r3, [sp, #1408]
+ ldr r12, [sp, #1412]
+ ldr lr, [sp, #1416]
+ ldr r4, [sp, #1420]
+ ldr r5, [sp, #1424]
+ ldr r6, [sp, #1428]
+ ldr r8, [sp, #1432]
+ ldr r9, [sp, #1436]
+ ldr r10, [sp, #1440]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ adds r7, r7, r11
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1396]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #232] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, r9, #296
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1388]
+ ldr r9, [sp, #1320]
+ ldr r1, [sp, #1328]
+ ldr r2, [sp, #1332]
+ ldr r3, [sp, #1336]
+ ldr r12, [sp, #1340]
+ ldr r10, [sp, #1344]
+ ldr lr, [sp, #1348]
+ ldr r4, [sp, #1352]
+ ldr r5, [sp, #1356]
+ ldr r8, [sp, #1360]
+ ldr r11, [sp, #1364]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ adds r7, r7, r9
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1248
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1316]
+ add r10, sp, #1280
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r6, r9, r10}
+ ldr r8, [sp, #1248]
+ ldr r0, [sp, #1252]
+ ldr r1, [sp, #1256]
+ ldr r2, [sp, #1260]
+ ldr r3, [sp, #1264]
+ ldr r12, [sp, #1268]
+ ldr lr, [sp, #1272]
+ ldr r11, [sp, #1276]
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r7, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r9, #152
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1244]
+ ldr r9, [sp, #1176]
+ ldr r1, [sp, #1184]
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r4, [sp, #1204]
+ ldr r5, [sp, #1208]
+ ldr r6, [sp, #1212]
+ ldr r8, [sp, #1216]
+ ldr r10, [sp, #1220]
+ ldr r11, [sp, #1224]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1240]
+ adds r7, r7, r9
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1232]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #1104
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r4, [sp, #1104]
+ ldr r9, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #1112]
+ ldr r2, [sp, #1116]
+ ldr r3, [sp, #1120]
+ ldr r12, [sp, #1124]
+ ldr r10, [sp, #1128]
+ ldr r11, [sp, #1132]
+ ldr lr, [sp, #1136]
+ ldr r7, [sp, #1140]
+ ldr r8, [sp, #1144]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ mov r7, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, r8, #8
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r8, [sp, #1032]
+ ldr r1, [sp, #1040]
+ ldr r2, [sp, #1044]
+ ldr r3, [sp, #1048]
+ ldr r12, [sp, #1052]
+ ldr lr, [sp, #1056]
+ ldr r4, [sp, #1060]
+ ldr r5, [sp, #1064]
+ ldr r6, [sp, #1068]
+ ldr r9, [sp, #1072]
+ ldr r10, [sp, #1076]
+ ldr r11, [sp, #1080]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #20] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #960
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1028]
+ add lr, sp, #984
+ add r12, sp, #964
+ ldr r8, [sp, #1000]
+ ldr r7, [sp, #996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r4, [sp, #960]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r9, [sp, #20] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ mov r7, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #888
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #956]
+ add r11, sp, #916
+ add lr, sp, #892
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldr r8, [sp, #888]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r7, r5
+ ldr r7, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #816
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #884]
+ add lr, sp, #840
+ add r12, sp, #820
+ ldr r8, [sp, #856]
+ ldr r6, [sp, #852]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #872]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #868]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r4, [sp, #816]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #812]
+ add r10, sp, #768
+ add lr, sp, #744
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #672
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #740]
+ add r9, sp, #704
+ add r12, sp, #676
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #672]
+ ldr lr, [sp, #700]
+ ldr r10, [sp, #696]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r4, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #600
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #668]
+ add r10, sp, #624
+ add lr, sp, #600
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #652]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #648]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ mul r2, r11, r9
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #596]
+ add r8, sp, #560
+ add r12, sp, #532
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldr r4, [sp, #528]
+ ldr lr, [sp, #556]
+ ldr r10, [sp, #552]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r4, r9
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #524]
+ add r10, sp, #480
+ add lr, sp, #456
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #452]
+ add r10, sp, #412
+ add lr, sp, #388
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #440]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r7, r8, r9, r10}
+ ldr r4, [sp, #384]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r7
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv544x32(PLT)
+ add r6, sp, #312
+ add r10, sp, #356
+ add lr, sp, #328
+ ldm r6, {r0, r1, r3, r6}
+ adds r0, r4, r0
+ adcs r7, r11, r1
+ mul r0, r7, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #232] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r10}
+ ldr r9, [sp, #352]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #228] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r11, r0, r3
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r10, r0, r10
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #240
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r7, r0
+ ldr r0, [sp, #232] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r9, [sp, #100] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [sp, #104] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r0, [sp, #256]
+ str lr, [sp, #108] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #260]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r4, [sp, #112] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r5, [sp, #116] @ 4-byte Spill
+ adcs r11, r11, r0
+ ldr r0, [sp, #268]
+ str r11, [sp, #120] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #272]
+ ldr r1, [sp, #224] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ adcs r0, r1, r0
+ ldr r1, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ adcs r0, r1, r0
+ ldr r1, [sp, #228] @ 4-byte Reload
+ str r0, [sp, #232] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ adcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #292]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #236] @ 4-byte Spill
+ ldr r0, [sp, #296]
+ adcs r10, r10, r0
+ ldr r0, [sp, #300]
+ str r10, [sp, #136] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #304]
+ str r8, [sp, #140] @ 4-byte Spill
+ adcs r6, r6, r0
+ ldr r0, [sp, #308]
+ adcs r2, r1, r0
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #200] @ 4-byte Reload
+ subs r1, r9, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ sbcs r3, r12, r0
+ ldr r0, [sp, #192] @ 4-byte Reload
+ sbcs r12, lr, r0
+ ldr r0, [sp, #176] @ 4-byte Reload
+ sbcs lr, r4, r0
+ ldr r0, [sp, #180] @ 4-byte Reload
+ sbcs r4, r5, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ sbcs r5, r11, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ ldr r11, [sp, #224] @ 4-byte Reload
+ sbcs r9, r7, r0
+ ldr r0, [sp, #148] @ 4-byte Reload
+ ldr r7, [sp, #220] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [sp, #152] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #228] @ 4-byte Reload
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [sp, #164] @ 4-byte Reload
+ sbcs r0, r11, r0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #168] @ 4-byte Reload
+ sbcs r0, r10, r0
+ mov r10, r6
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [sp, #172] @ 4-byte Reload
+ sbcs r7, r8, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mov r8, r2
+ sbcs r11, r6, r0
+ ldr r0, [sp, #208] @ 4-byte Reload
+ sbcs r6, r2, r0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbc r2, r0, #0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ands r2, r2, #1
+ movne r1, r0
+ ldr r0, [sp, #212] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #176] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #8]
+ movne lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str lr, [r0, #12]
+ movne r4, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r4, [r0, #16]
+ movne r5, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ cmp r2, #0
+ str r5, [r0, #20]
+ movne r9, r1
+ ldr r1, [sp, #224] @ 4-byte Reload
+ str r9, [r0, #24]
+ movne r3, r1
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #180] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #216] @ 4-byte Reload
+ cmp r2, #0
+ str r3, [r0, #32]
+ ldr r3, [sp, #184] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #232] @ 4-byte Reload
+ str r3, [r0, #36]
+ ldr r3, [sp, #188] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #228] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #192] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ cmp r2, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #196] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #136] @ 4-byte Reload
+ movne r1, r3
+ str r1, [r0, #52]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ movne r7, r1
+ cmp r2, #0
+ movne r11, r10
+ movne r6, r8
+ str r7, [r0, #56]
+ str r11, [r0, #60]
+ str r6, [r0, #64]
+ add sp, sp, #444
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end260:
+ .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre17L
+ .align 2
+ .type mcl_fp_addPre17L,%function
+mcl_fp_addPre17L: @ @mcl_fp_addPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ ldm r1, {r3, lr}
+ ldr r8, [r1, #8]
+ ldr r5, [r1, #12]
+ ldm r2, {r6, r7, r12}
+ ldr r4, [r2, #12]
+ add r10, r2, #16
+ adds r3, r6, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #64]
+ str r3, [sp, #72] @ 4-byte Spill
+ adcs r3, r7, lr
+ add lr, r1, #16
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ adcs r6, r12, r8
+ adcs r8, r4, r5
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r2, [r1, #64]
+ ldr r11, [r1, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r9, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r1, r10, r3
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r12
+ adcs r12, r1, lr
+ str r2, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ adcs r2, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r3, r1, r3
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r1, r7
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r4
+ ldr r1, [sp, #64] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [r0, #60]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [r0, #64]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end261:
+ .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre17L
+ .align 2
+ .type mcl_fp_subPre17L,%function
+mcl_fp_subPre17L: @ @mcl_fp_subPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ ldm r2, {r3, lr}
+ ldr r8, [r2, #8]
+ ldr r5, [r2, #12]
+ ldm r1, {r6, r7, r12}
+ ldr r4, [r1, #12]
+ add r10, r2, #16
+ subs r3, r6, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #64]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r7, lr
+ add lr, r1, #16
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r6, r12, r8
+ sbcs r8, r4, r5
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r2, [r1, #64]
+ ldr r11, [r1, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, r4
+ str r9, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r2, r2, r5
+ str r1, [r0, #16]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r1, r3, r10
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, lr, r1
+ str r2, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ sbcs r2, r2, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r7, r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r6, r6, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r5, r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r4, r4, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ sbcs r1, r11, r1
+ str r1, [r0, #60]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r2, r1
+ str r1, [r0, #64]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end262:
+ .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_17L
+ .align 2
+ .type mcl_fp_shr1_17L,%function
+mcl_fp_shr1_17L: @ @mcl_fp_shr1_17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r4, [r1, #4]
+ ldr r3, [r1, #8]
+ add r9, r1, #32
+ ldr r2, [r1, #12]
+ ldr r11, [r1]
+ lsr r7, r4, #1
+ lsr lr, r2, #1
+ lsrs r2, r2, #1
+ orr r10, r7, r3, lsl #31
+ ldr r7, [r1, #64]
+ rrx r12, r3
+ lsrs r3, r4, #1
+ add r4, r1, #16
+ rrx r11, r11
+ str r7, [sp, #16] @ 4-byte Spill
+ ldm r9, {r5, r7, r9}
+ ldr r6, [r1, #48]
+ ldr r8, [r1, #44]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r1, #52]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r1, #56]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r1, #60]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldm r4, {r1, r2, r3, r4}
+ str r11, [r0]
+ stmib r0, {r10, r12}
+ orr r6, lr, r1, lsl #31
+ str r6, [r0, #12]
+ lsrs r6, r2, #1
+ rrx r1, r1
+ str r1, [r0, #16]
+ lsr r1, r2, #1
+ ldr r2, [sp, #4] @ 4-byte Reload
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r4, #1
+ rrx r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ str r1, [r0, #24]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r7, #1
+ rrx r1, r5
+ str r1, [r0, #32]
+ lsr r1, r7, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #36]
+ lsrs r1, r8, #1
+ rrx r1, r9
+ str r1, [r0, #40]
+ lsr r1, r8, #1
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #44]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #48]
+ lsr r1, r2, #1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #52]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ str r1, [r0, #56]
+ lsr r1, r2, #1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ orr r1, r1, r2, lsl #31
+ str r1, [r0, #60]
+ lsr r1, r2, #1
+ str r1, [r0, #64]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end263:
+ .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add17L
+ .align 2
+ .type mcl_fp_add17L,%function
+mcl_fp_add17L: @ @mcl_fp_add17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r5, r5, r8
+ mov r8, r9
+ adcs r6, r6, lr
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ str r8, [r0]
+ adcs r7, r7, r12
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr lr, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str lr, [r0, #4]
+ adcs r10, r7, r6
+ ldr r7, [r2, #20]
+ ldr r6, [r2, #28]
+ str r10, [sp, #4] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #44]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r9, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [r2, #48]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r1, #44]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r5, [r1, #48]
+ ldr r6, [r2, #56]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ adcs r11, r4, r5
+ ldr r4, [r1, #52]
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r11, [sp, #8] @ 4-byte Spill
+ adcs r7, r7, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ str r4, [r0, #8]
+ str r5, [r0, #12]
+ str r10, [r0, #16]
+ str r9, [r0, #20]
+ ldr r10, [sp, #12] @ 4-byte Reload
+ adcs r12, r6, r7
+ ldr r7, [r1, #60]
+ ldr r6, [r2, #60]
+ ldr r1, [r1, #64]
+ ldr r2, [r2, #64]
+ adcs r6, r6, r7
+ adcs r2, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r10, [r0, #28]
+ str r2, [r0, #64]
+ mov r2, #0
+ str r1, [r0, #32]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #44]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r11, [r0, #48]
+ mov r11, r12
+ str r1, [r0, #52]
+ adc r1, r2, #0
+ str r12, [r0, #56]
+ str r6, [r0, #60]
+ mov r12, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r6, r8, r6
+ sbcs r7, lr, r7
+ str r6, [sp] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp, #32] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ sbcs r1, r5, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r9, r9, r1
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r10, r10, r1
+ ldr r1, [r3, #32]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ sbcs lr, r2, r1
+ ldr r1, [r3, #44]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r8, r2, r1
+ ldr r1, [r3, #48]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [r3, #52]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r5, r2, r1
+ ldr r1, [r3, #56]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r7, r11, r1
+ ldr r1, [r3, #60]
+ sbcs r6, r12, r1
+ ldr r1, [r3, #64]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB264_2
+@ BB#1: @ %nocarry
+ ldr r2, [sp] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r9, [r0, #20]
+ str r2, [r0, #24]
+ str r10, [r0, #28]
+ str r1, [r0, #64]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #36]
+ add r1, r0, #48
+ str lr, [r0, #40]
+ str r8, [r0, #44]
+ stm r1, {r4, r5, r7}
+ str r6, [r0, #60]
+.LBB264_2: @ %carry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end264:
+ .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF17L
+ .align 2
+ .type mcl_fp_addNF17L,%function
+mcl_fp_addNF17L: @ @mcl_fp_addNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #96
+ sub sp, sp, #96
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r11, r4, r9
+ ldr r4, [r1, #24]
+ adcs r10, r5, r8
+ ldr r5, [r1, #20]
+ str r11, [sp, #8] @ 4-byte Spill
+ adcs r8, r6, lr
+ ldr r6, [r1, #16]
+ str r10, [sp, #16] @ 4-byte Spill
+ adcs r9, r7, r12
+ ldr r7, [r2, #16]
+ str r8, [sp, #20] @ 4-byte Spill
+ str r9, [sp, #24] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs lr, r7, r5
+ ldr r7, [r2, #24]
+ str lr, [sp, #4] @ 4-byte Spill
+ adcs r7, r7, r4
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r2, #44]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r7, r6, r7
+ ldr r6, [r2, #48]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ adcs r7, r6, r7
+ ldr r6, [r2, #52]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ adcs r7, r6, r7
+ ldr r6, [r2, #56]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ adcs r7, r6, r7
+ ldr r6, [r2, #60]
+ ldr r2, [r2, #64]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #60]
+ ldr r1, [r1, #64]
+ adcs r7, r6, r7
+ adc r1, r2, r1
+ str r7, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #88] @ 4-byte Spill
+ ldm r3, {r1, r7}
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ subs r12, r11, r1
+ ldr r1, [r3, #64]
+ ldr r11, [r3, #36]
+ sbcs r4, r10, r7
+ ldr r10, [r3, #32]
+ ldr r7, [r3, #24]
+ sbcs r6, r8, r6
+ sbcs r9, r9, r5
+ ldr r5, [r3, #28]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r3, [r3, #16]
+ sbcs r2, r2, r3
+ sbcs r3, lr, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs lr, r1, r7
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r1, r10
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r11, r1, r11
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbc r10, r1, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ asr r1, r10, #31
+ cmp r1, #0
+ movlt r12, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r12, [r0]
+ movlt r4, r7
+ str r4, [r0, #4]
+ ldr r4, [sp, #20] @ 4-byte Reload
+ movlt r6, r4
+ cmp r1, #0
+ str r6, [r0, #8]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ movlt r9, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ str r9, [r0, #12]
+ movlt r2, r6
+ str r2, [r0, #16]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r8, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r3, [r0, #56]
+ movlt r2, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #60]
+ movlt r10, r1
+ str r10, [r0, #64]
+ add sp, sp, #96
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end265:
+ .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub17L
+ .align 2
+ .type mcl_fp_sub17L,%function
+mcl_fp_sub17L: @ @mcl_fp_sub17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldm r2, {r8, r9, lr}
+ ldr r12, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r8
+ sbcs r5, r5, r9
+ str r4, [sp, #68] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, lr
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ sbcs r7, r7, r5
+ ldr r5, [r1, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r11, r7, r4
+ ldr r7, [r2, #28]
+ ldr r4, [r2, #52]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r10, [sp, #60] @ 4-byte Spill
+ sbcs r9, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r9, [sp, #56] @ 4-byte Spill
+ sbcs r7, r6, r7
+ ldr r6, [r1, #40]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ sbcs r8, r6, r7
+ ldr r7, [r2, #44]
+ str r8, [sp, #52] @ 4-byte Spill
+ sbcs lr, r5, r7
+ ldr r7, [r2, #48]
+ ldr r5, [r1, #48]
+ str lr, [sp, #48] @ 4-byte Spill
+ sbcs r6, r5, r7
+ ldr r5, [r1, #52]
+ sbcs r7, r5, r4
+ ldr r4, [r2, #56]
+ ldr r5, [r1, #56]
+ str r7, [sp, #44] @ 4-byte Spill
+ sbcs r12, r5, r4
+ ldr r4, [r2, #60]
+ ldr r5, [r1, #60]
+ ldr r2, [r2, #64]
+ ldr r1, [r1, #64]
+ str r12, [sp, #40] @ 4-byte Spill
+ sbcs r4, r5, r4
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #20]
+ add r2, r0, #36
+ str r11, [r0, #24]
+ str r10, [r0, #28]
+ str r1, [r0, #64]
+ str r9, [r0, #32]
+ stm r2, {r5, r8, lr}
+ add r2, r0, #48
+ stm r2, {r6, r7, r12}
+ mov r2, #0
+ str r4, [r0, #60]
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB266_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #64]
+ mov r9, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldm r3, {r4, r12}
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r7, [r3, #8]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ ldr lr, [r3, #20]
+ adds r8, r4, r2
+ ldr r2, [r3, #32]
+ str r8, [r0]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #36]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #44]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r6, r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r7, r7, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r4, r1, r2
+ ldr r2, [r3, #28]
+ ldr r1, [r3, #24]
+ ldr r3, [r3, #16]
+ stmib r0, {r6, r7}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r4, [r0, #12]
+ ldr r6, [sp, #16] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r1, r1, r11
+ str r7, [r0, #20]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r2, r3
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r12, r2, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ adcs r2, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r4, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [r0, #60]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [r0, #64]
+.LBB266_2: @ %nocarry
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end266:
+ .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF17L
+ .align 2
+ .type mcl_fp_subNF17L,%function
+mcl_fp_subNF17L: @ @mcl_fp_subNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ mov r12, r0
+ ldr r0, [r2, #64]
+ ldr r11, [r2]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r2, #56]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r2, #60]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldmib r2, {r5, r6, r7, r8, r9, r10}
+ ldr r0, [r2, #28]
+ ldr r2, [r1]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r1, {r0, lr}
+ ldr r4, [r1, #12]
+ subs r2, r2, r11
+ add r11, r3, #8
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ sbcs r0, r0, r5
+ ldr r5, [r1, #40]
+ str r0, [sp, #8] @ 4-byte Spill
+ sbcs r0, lr, r6
+ ldr r6, [r1, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r4, r7
+ ldr r7, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ sbcs r0, r7, r8
+ ldr r7, [r1, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r7, r9
+ ldr r7, [r1, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r7, r10
+ ldr r7, [r1, #32]
+ ldr r1, [r1, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r5, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r0, r2, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #64]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldm r11, {r1, r4, r5, r6, r11}
+ ldr r8, [sp, #12] @ 4-byte Reload
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r0, [r3, #28]
+ adds r2, r8, r2
+ adcs r3, r10, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs lr, r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r4, r1, r4
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r7, r1, r11
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ movge r2, r8
+ movge r3, r10
+ str r2, [r12]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r3, [r12, #4]
+ movge lr, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r9, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r9, [r12, #28]
+ movge r11, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r12, #32]
+ movge r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r2, [r12, #36]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r2, [r12, #44]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r2, [r12, #48]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r2, [r12, #52]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [r12, #56]
+ movge r1, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r1, [r12, #60]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #64]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end267:
+ .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add17L
+ .align 2
+ .type mcl_fpDbl_add17L,%function
+mcl_fpDbl_add17L: @ @mcl_fpDbl_add17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #216
+ sub sp, sp, #216
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #128]
+ str r4, [sp, #208] @ 4-byte Spill
+ ldr r4, [r2, #132]
+ str r4, [sp, #212] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #100] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #96]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #200] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #204] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #128]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #132]
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #64]
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r12, r2, r7
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #96] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r9, [sp, #100] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r8, [sp, #104] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #168] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #144] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs lr, r1, r2
+ ldr r1, [sp, #172] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #188] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #192] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #196] @ 4-byte Spill
+ ldr r1, [sp, #200] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r1, [sp, #200] @ 4-byte Spill
+ ldr r1, [sp, #204] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r1, [sp, #204] @ 4-byte Spill
+ ldr r1, [sp, #208] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r1, [sp, #208] @ 4-byte Spill
+ ldr r1, [sp, #212] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #212] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #140] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldr r1, [r3, #64]
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ ldr r10, [r3, #36]
+ ldr r11, [r3, #40]
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ subs r12, r12, r2
+ sbcs r7, r9, r7
+ sbcs r6, r8, r6
+ add r8, r3, #20
+ sbcs r9, r4, r5
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #160] @ 4-byte Spill
+ ldm r8, {r1, r4, r5, r8}
+ ldr r3, [r3, #16]
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r2, r3
+ sbcs r3, lr, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs lr, r1, r4
+ ldr r1, [sp, #176] @ 4-byte Reload
+ sbcs r4, r1, r5
+ ldr r1, [sp, #180] @ 4-byte Reload
+ ldr r5, [sp, #136] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r10, r1, r10
+ ldr r1, [sp, #188] @ 4-byte Reload
+ sbcs r11, r1, r11
+ ldr r1, [sp, #192] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #148] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #152] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #200] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #204] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #208] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #164] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #212] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r12, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r12, [r0, #68]
+ movne r7, r5
+ str r7, [r0, #72]
+ ldr r7, [sp, #104] @ 4-byte Reload
+ movne r6, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r0, #76]
+ movne r9, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ str r9, [r0, #80]
+ movne r2, r7
+ str r2, [r0, #84]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #172] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #88]
+ ldr r3, [sp, #136] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #176] @ 4-byte Reload
+ str lr, [r0, #92]
+ movne r4, r2
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r4, [r0, #96]
+ movne r8, r2
+ ldr r2, [sp, #184] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #100]
+ movne r10, r2
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r10, [r0, #104]
+ movne r11, r2
+ ldr r2, [sp, #192] @ 4-byte Reload
+ str r11, [r0, #108]
+ movne r3, r2
+ ldr r2, [sp, #196] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #112]
+ ldr r3, [sp, #148] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #200] @ 4-byte Reload
+ str r3, [r0, #116]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #204] @ 4-byte Reload
+ str r3, [r0, #120]
+ ldr r3, [sp, #156] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #208] @ 4-byte Reload
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r3, [r0, #124]
+ ldr r3, [sp, #164] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #128]
+ movne r3, r1
+ str r3, [r0, #132]
+ add sp, sp, #216
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end268:
+ .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub17L
+ .align 2
+ .type mcl_fpDbl_sub17L,%function
+mcl_fpDbl_sub17L: @ @mcl_fpDbl_sub17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #216
+ sub sp, sp, #216
+ ldr r7, [r2, #128]
+ add r10, r1, #32
+ str r7, [sp, #208] @ 4-byte Spill
+ ldr r7, [r2, #132]
+ str r7, [sp, #212] @ 4-byte Spill
+ ldr r7, [r2, #96]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #200] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #204] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldm r2, {r6, r8, r12, lr}
+ ldm r1, {r4, r5, r7, r9}
+ subs r4, r4, r6
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #128] @ 4-byte Spill
+ sbcs r4, r5, r8
+ sbcs r7, r7, r12
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r4, [sp, #96] @ 4-byte Spill
+ str r7, [sp, #88] @ 4-byte Spill
+ sbcs r7, r9, lr
+ add lr, r1, #16
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #128]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #132]
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ add lr, r3, #8
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ sbcs r1, r9, r1
+ str r2, [r0, #44]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ add r9, r3, #20
+ str r1, [r0, #48]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r1, r7, r1
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #64]
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r5, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r10, r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r10, [sp, #96] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #188] @ 4-byte Spill
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r11, r7, r2
+ ldr r2, [sp, #164] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r11, [sp, #128] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #192] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [sp, #192] @ 4-byte Spill
+ ldr r2, [sp, #196] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #196] @ 4-byte Spill
+ ldr r2, [sp, #200] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r2, [sp, #200] @ 4-byte Spill
+ ldr r2, [sp, #204] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #132] @ 4-byte Reload
+ str r2, [sp, #204] @ 4-byte Spill
+ ldr r2, [sp, #208] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #136] @ 4-byte Reload
+ str r2, [sp, #208] @ 4-byte Spill
+ ldr r2, [sp, #212] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #212] @ 4-byte Spill
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #64]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldm lr, {r6, r12, lr}
+ ldm r9, {r4, r8, r9}
+ ldr r3, [sp, #160] @ 4-byte Reload
+ adds r1, r5, r2
+ adcs r2, r10, r7
+ ldr r7, [sp, #164] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #172] @ 4-byte Reload
+ adcs r12, r6, r12
+ ldr r6, [sp, #176] @ 4-byte Reload
+ adcs lr, r6, lr
+ ldr r6, [sp, #180] @ 4-byte Reload
+ adcs r4, r6, r4
+ ldr r6, [sp, #184] @ 4-byte Reload
+ adcs r8, r6, r8
+ ldr r6, [sp, #188] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r10, r11, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #156] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #132] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #168] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #136] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [sp, #192] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [sp, #196] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [sp, #200] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [sp, #204] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [sp, #208] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [sp, #212] @ 4-byte Reload
+ adc r7, r7, r11
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [sp, #124] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, r5
+ str r1, [r0, #68]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r2, [r0, #72]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r7, #0
+ str r3, [r0, #76]
+ ldr r3, [sp, #156] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r12, [r0, #80]
+ moveq lr, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str lr, [r0, #84]
+ moveq r4, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ cmp r7, #0
+ str r4, [r0, #88]
+ moveq r8, r1
+ ldr r1, [sp, #188] @ 4-byte Reload
+ str r8, [r0, #92]
+ moveq r9, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r9, [r0, #96]
+ moveq r10, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ cmp r7, #0
+ str r10, [r0, #100]
+ moveq r2, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r2, [r0, #104]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #196] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #112]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r2, [r0, #116]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r2, [r0, #120]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #208] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #124]
+ ldr r2, [sp, #152] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #128]
+ moveq r3, r1
+ str r3, [r0, #132]
+ add sp, sp, #216
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L
+ .cantunwind
+ .fnend
+
+
+ .section ".note.GNU-stack","",%progbits
+ .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s b/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s
new file mode 100644
index 000000000..1ed2a1233
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s
@@ -0,0 +1,154 @@
+ .arch armv7-a
+
+ .align 2
+ .global mcl_fp_addPre64
+mcl_fp_addPre64:
+ ldm r1, {r3, r12}
+ ldm r2, {r1, r2}
+ adds r1, r1, r3
+ adc r2, r2, r12
+ stm r0, {r1, r2}
+ bx lr
+
+
+ .align 2
+ .global mcl_fp_addPre96
+mcl_fp_addPre96:
+ push {r4, lr}
+ ldm r1, {r1, r3, r12}
+ ldm r2, {r2, r4, lr}
+ adds r1, r1, r2
+ adcs r3, r3, r4
+ adc r12, r12, lr
+ stm r0, {r1, r3, r12}
+ pop {r4, lr}
+ bx lr
+
+# slower
+ .align 2
+ .global mcl_fp_addPre96_2
+mcl_fp_addPre96_2:
+ ldr r3, [r1], #4
+ ldr r12, [r2], #4
+ adds r3, r3, r12
+ str r3, [r0], #4
+
+ ldm r1, {r1, r3}
+ ldm r2, {r2, r12}
+ adcs r1, r1, r2
+ adcs r3, r3, r12
+ stm r0, {r1, r3}
+ bx lr
+
+ .globl mcl_fp_addPre128
+ .align 2
+mcl_fp_addPre128:
+ push {r4, lr}
+ ldm r1!, {r3, r4}
+ ldm r2!, {r12, lr}
+ adds r3, r3, r12
+ adcs r4, r4, lr
+ stm r0!, {r3, r4}
+ ldm r1, {r3, r4}
+ ldm r2, {r12, lr}
+ adcs r3, r3, r12
+ adcs r4, r4, lr
+ stm r0, {r3, r4}
+ pop {r4, lr}
+ bx lr
+
+ # almost same
+ .globl mcl_fp_addPre128_2
+ .align 2
+cl_fp_addPre128_2:
+ push {r4, r5, r6, lr}
+ ldm r1, {r1, r3, r4, r5}
+ ldm r2, {r2, r6, r12, lr}
+ adds r1, r1, r2
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r1, r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre160
+ .align 2
+mcl_fp_addPre160:
+ push {r4, lr}
+ ldm r1!, {r3, r4}
+ ldm r2!, {r12, lr}
+ adds r3, r3, r12
+ adcs r4, r4, lr
+ stm r0!, {r3, r4}
+ ldm r1, {r1, r3, r4}
+ ldm r2, {r2, r12, lr}
+ adcs r1, r1, r2
+ adcs r3, r3, r12
+ adcs r4, r4, lr
+ stm r0, {r1, r3, r4}
+ pop {r4, lr}
+ bx lr
+
+ .globl mcl_fp_addPre192
+ .align 2
+mcl_fp_addPre192:
+ push {r4, r5, r6, lr}
+ ldm r1!, {r3, r4, r5}
+ ldm r2!, {r6, r12, lr}
+ adds r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0!, {r3, r4, r5}
+
+ ldm r1, {r3, r4, r5}
+ ldm r2, {r6, r12, lr}
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre224
+ .align 2
+mcl_fp_addPre224:
+ push {r4, r5, r6, lr}
+ ldm r1!, {r3, r4, r5}
+ ldm r2!, {r6, r12, lr}
+ adds r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0!, {r3, r4, r5}
+
+ ldm r1, {r1, r3, r4, r5}
+ ldm r2, {r2, r6, r12, lr}
+ adcs r1, r1, r2
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r1, r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre256
+ .align 2
+mcl_fp_addPre256:
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r1!, {r3, r4, r5, r6}
+ ldm r2!, {r7, r8, r12, lr}
+ adds r3, r3, r7
+ adcs r4, r4, r8
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ stm r0!, {r3, r4, r5, r6}
+
+ ldm r1, {r3, r4, r5, r6}
+ ldm r2, {r7, r8, r12, lr}
+ adcs r3, r3, r7
+ adcs r4, r4, r8
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ stm r0, {r3, r4, r5, r6}
+ pop {r4, r5, r6, r7, r8, lr}
+ bx lr
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm
new file mode 100644
index 000000000..b09b9dcd3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm
@@ -0,0 +1,153 @@
+
+; Linux rdi rsi rdx rcx
+; Win rcx rdx r8 r9
+
+%ifdef _WIN64
+ %define p1org rcx
+ %define p2org rdx
+ %define p3org r8
+ %define p4org r9
+%else
+ %define p1org rdi
+ %define p2org rsi
+ %define p3org rdx
+ %define p4org rcx
+%endif
+
+%imacro proc 1
+global %1
+%1:
+%endmacro
+
+segment .text
+
+%imacro addPre 1
+ mov rax, [p2org]
+ add rax, [p3org]
+ mov [p1org], rax
+%assign i 1
+%rep %1
+ mov rax, [p2org + i * 8]
+ adc rax, [p3org + i * 8]
+ mov [p1org + i * 8], rax
+%assign i (i+1)
+%endrep
+ setc al
+ movzx eax, al
+ ret
+%endmacro
+
+%imacro subNC 1
+ mov rax, [p2org]
+ sub rax, [p3org]
+ mov [p1org], rax
+%assign i 1
+%rep %1
+ mov rax, [p2org + i * 8]
+ sbb rax, [p3org + i * 8]
+ mov [p1org + i * 8], rax
+%assign i (i+1)
+%endrep
+ setc al
+ movzx eax, al
+ ret
+%endmacro
+
+proc mcl_fp_addPre64
+ addPre 0
+proc mcl_fp_addPre128
+ addPre 1
+proc mcl_fp_addPre192
+ addPre 2
+proc mcl_fp_addPre256
+ addPre 3
+proc mcl_fp_addPre320
+ addPre 4
+proc mcl_fp_addPre384
+ addPre 5
+proc mcl_fp_addPre448
+ addPre 6
+proc mcl_fp_addPre512
+ addPre 7
+proc mcl_fp_addPre576
+ addPre 8
+proc mcl_fp_addPre640
+ addPre 9
+proc mcl_fp_addPre704
+ addPre 10
+proc mcl_fp_addPre768
+ addPre 11
+proc mcl_fp_addPre832
+ addPre 12
+proc mcl_fp_addPre896
+ addPre 13
+proc mcl_fp_addPre960
+ addPre 14
+proc mcl_fp_addPre1024
+ addPre 15
+proc mcl_fp_addPre1088
+ addPre 16
+proc mcl_fp_addPre1152
+ addPre 17
+proc mcl_fp_addPre1216
+ addPre 18
+proc mcl_fp_addPre1280
+ addPre 19
+proc mcl_fp_addPre1344
+ addPre 20
+proc mcl_fp_addPre1408
+ addPre 21
+proc mcl_fp_addPre1472
+ addPre 22
+proc mcl_fp_addPre1536
+ addPre 23
+
+proc mcl_fp_subNC64
+ subNC 0
+proc mcl_fp_subNC128
+ subNC 1
+proc mcl_fp_subNC192
+ subNC 2
+proc mcl_fp_subNC256
+ subNC 3
+proc mcl_fp_subNC320
+ subNC 4
+proc mcl_fp_subNC384
+ subNC 5
+proc mcl_fp_subNC448
+ subNC 6
+proc mcl_fp_subNC512
+ subNC 7
+proc mcl_fp_subNC576
+ subNC 8
+proc mcl_fp_subNC640
+ subNC 9
+proc mcl_fp_subNC704
+ subNC 10
+proc mcl_fp_subNC768
+ subNC 11
+proc mcl_fp_subNC832
+ subNC 12
+proc mcl_fp_subNC896
+ subNC 13
+proc mcl_fp_subNC960
+ subNC 14
+proc mcl_fp_subNC1024
+ subNC 15
+proc mcl_fp_subNC1088
+ subNC 16
+proc mcl_fp_subNC1152
+ subNC 17
+proc mcl_fp_subNC1216
+ subNC 18
+proc mcl_fp_subNC1280
+ subNC 19
+proc mcl_fp_subNC1344
+ subNC 20
+proc mcl_fp_subNC1408
+ subNC 21
+proc mcl_fp_subNC1472
+ subNC 22
+proc mcl_fp_subNC1536
+ subNC 23
+
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s
new file mode 100644
index 000000000..e12174ac6
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s
@@ -0,0 +1,14155 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192Lbmi2
+ .align 16, 0x90
+ .type makeNIST_P192Lbmi2,@function
+makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2
+# BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+.Lfunc_end0:
+ .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function
+mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2
+
+ .globl mcl_fp_sqr_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192Lbmi2,@function
+mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %r8, %rdx
+ mulxq %rsi, %r14, %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %rdx
+ mulxq %rsi, %r13, %r15
+ movq %rsi, %rdx
+ mulxq %rcx, %r12, %rsi
+ addq %rsi, %r13
+ adcq %r14, %r15
+ adcq $0, %rbx
+ movq %rcx, %rdx
+ mulxq %rcx, %r9, %rax
+ addq %r12, %rax
+ movq %r8, %rdx
+ mulxq %rcx, %rbp, %r11
+ adcq %rbp, %rsi
+ movq %r11, %r10
+ adcq $0, %r10
+ addq %r12, %rax
+ adcq %r13, %rsi
+ adcq %r15, %r10
+ adcq $0, %rbx
+ movq %r8, %rdx
+ mulxq %r8, %rcx, %rdi
+ addq %r14, %r11
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rsi
+ adcq %r10, %r11
+ adcq %rbx, %rcx
+ adcq $0, %rdi
+ addq %rdi, %rax
+ adcq $0, %rsi
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq %r11, %r9
+ adcq %rcx, %rax
+ adcq %rdi, %rsi
+ adcq $0, %rdx
+ addq %rdi, %r9
+ adcq %r11, %rax
+ adcq %rcx, %rsi
+ adcq $0, %rdx
+ addq %rdx, %r9
+ adcq %rax, %rdx
+ adcq $0, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r9, %rcx
+ addq $1, %rcx
+ movq %rdx, %rdi
+ adcq $1, %rdi
+ movq %rsi, %rbp
+ adcq $0, %rbp
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r9, %rcx
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rcx, (%rbx)
+ testb %al, %al
+ cmovneq %rdx, %rdi
+ movq %rdi, 8(%rbx)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2
+
+ .globl mcl_fp_mulNIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192Lbmi2,@function
+mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq mcl_fpDbl_mulPre3Lbmi2@PLT
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P521Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function
+mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ andl $511, %ebx # imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movq %rbx, %rsi
+ shrq $9, %rsi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je .LBB4_1
+# BB#3: # %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx # imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+.LBB4_2: # %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2
+
+ .globl mcl_fp_mulUnitPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1Lbmi2,@function
+mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2
+
+ .globl mcl_fpDbl_mulPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1Lbmi2,@function
+mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2
+# BB#0:
+ movq (%rdx), %rdx
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2
+
+ .globl mcl_fpDbl_sqrPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1Lbmi2,@function
+mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2
+# BB#0:
+ movq (%rsi), %rdx
+ mulxq %rdx, %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2
+
+ .globl mcl_fp_mont1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont1Lbmi2,@function
+mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2
+# BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ movq %rax, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rdx
+ testb $1, %dl
+ cmovneq %rax, %rsi
+ movq %rsi, (%rdi)
+ retq
+.Lfunc_end8:
+ .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2
+
+ .globl mcl_fp_montNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF1Lbmi2,@function
+mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2
+# BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ movq %rax, %rdx
+ subq %rcx, %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end9:
+ .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2
+
+ .globl mcl_fp_montRed1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed1Lbmi2,@function
+mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ movq %rax, %rdx
+ mulxq %r8, %rax, %rdx
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+.Lfunc_end10:
+ .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2
+
+ .globl mcl_fp_addPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre1Lbmi2,@function
+mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end11:
+ .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2
+
+ .globl mcl_fp_subPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre1Lbmi2,@function
+mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end12:
+ .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2
+
+ .globl mcl_fp_shr1_1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_1Lbmi2,@function
+mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end13:
+ .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2
+
+ .globl mcl_fp_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add1Lbmi2,@function
+mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+.LBB14_2: # %carry
+ retq
+.Lfunc_end14:
+ .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2
+
+ .globl mcl_fp_addNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF1Lbmi2,@function
+mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end15:
+ .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2
+
+ .globl mcl_fp_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub1Lbmi2,@function
+mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB16_2
+# BB#1: # %nocarry
+ retq
+.LBB16_2: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end16:
+ .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2
+
+ .globl mcl_fp_subNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF1Lbmi2,@function
+mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end17:
+ .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2
+
+ .globl mcl_fpDbl_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add1Lbmi2,@function
+mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end18:
+ .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2
+
+ .globl mcl_fpDbl_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1Lbmi2,@function
+mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2
+
+ .globl mcl_fp_mulUnitPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2Lbmi2,@function
+mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2
+# BB#0:
+ mulxq 8(%rsi), %rax, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %rax, %rsi
+ movq %rsi, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2
+
+ .globl mcl_fpDbl_mulPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2Lbmi2,@function
+mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2
+# BB#0:
+ movq %rdx, %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r8
+ movq (%r10), %rsi
+ movq %r11, %rdx
+ mulxq %rsi, %rdx, %r9
+ movq %rdx, (%rdi)
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rax
+ addq %r9, %rsi
+ adcq $0, %rax
+ movq 8(%r10), %rcx
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r8, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2
+
+ .globl mcl_fpDbl_sqrPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2Lbmi2,@function
+mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rsi
+ movq %rdx, (%rdi)
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %rdx, %rsi
+ movq %r8, %rax
+ adcq $0, %rax
+ addq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2
+
+ .globl mcl_fp_mont2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont2Lbmi2,@function
+mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %r13
+ movq %r8, %rdx
+ mulxq %rax, %r14, %rsi
+ addq %r10, %rsi
+ adcq $0, %r13
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r14, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r15
+ mulxq %r15, %r12, %rcx
+ mulxq %r10, %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rdx
+ adcq %rsi, %rbx
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %r14
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rax
+ adcq %rsi, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r15, %rcx, %rbx
+ movq %rbp, %rdx
+ mulxq %r10, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rdx
+ adcq %rax, %rbp
+ adcq %r14, %rbx
+ adcq $0, %rsi
+ movq %rbp, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r15, %rcx
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rcx
+ testb %sil, %sil
+ cmovneq %rbp, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end23:
+ .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2
+
+ .globl mcl_fp_montNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF2Lbmi2,@function
+mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %r15, %r13
+ addq %r10, %r13
+ adcq $0, %rsi
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r15, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r14
+ mulxq %r10, %rcx, %r12
+ addq %r15, %rcx
+ mulxq %r14, %rbx, %rcx
+ adcq %r13, %rbx
+ adcq $0, %rsi
+ addq %r12, %rbx
+ adcq %rcx, %rsi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %rcx
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %rcx
+ addq %rbx, %r8
+ adcq %rsi, %rax
+ adcq $0, %rcx
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r14, %rbx, %rsi
+ movq %rbp, %rdx
+ mulxq %r10, %rbp, %rdx
+ addq %r8, %rbp
+ adcq %rax, %rbx
+ adcq $0, %rcx
+ addq %rdx, %rbx
+ adcq %rsi, %rcx
+ movq %rbx, %rax
+ subq %r10, %rax
+ movq %rcx, %rdx
+ sbbq %r14, %rdx
+ cmovsq %rbx, %rax
+ movq %rax, (%rdi)
+ cmovsq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end24:
+ .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2
+
+ .globl mcl_fp_montRed2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed2Lbmi2,@function
+mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r15
+ movq (%rdx), %r8
+ movq (%rsi), %r10
+ movq %r10, %rcx
+ imulq %r15, %rcx
+ movq 8(%rdx), %r9
+ movq %rcx, %rdx
+ mulxq %r9, %r11, %r14
+ movq %rcx, %rdx
+ mulxq %r8, %rcx, %rax
+ addq %r11, %rax
+ adcq $0, %r14
+ movq 24(%rsi), %r11
+ addq %r10, %rcx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r14
+ adcq $0, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ imulq %rax, %r15
+ movq %r15, %rdx
+ mulxq %r9, %r10, %rbx
+ movq %r15, %rdx
+ mulxq %r8, %rsi, %rdx
+ addq %r10, %rdx
+ adcq $0, %rbx
+ addq %rax, %rsi
+ adcq %r14, %rdx
+ adcq %r11, %rbx
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbx, %rsi
+ sbbq %r9, %rsi
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %rsi
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end25:
+ .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2
+
+ .globl mcl_fp_addPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre2Lbmi2,@function
+mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end26:
+ .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2
+
+ .globl mcl_fp_subPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre2Lbmi2,@function
+mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end27:
+ .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2
+
+ .globl mcl_fp_shr1_2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_2Lbmi2,@function
+mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+.Lfunc_end28:
+ .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2
+
+ .globl mcl_fp_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add2Lbmi2,@function
+mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+.LBB29_2: # %carry
+ retq
+.Lfunc_end29:
+ .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2
+
+ .globl mcl_fp_addNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF2Lbmi2,@function
+mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end30:
+ .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2
+
+ .globl mcl_fp_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub2Lbmi2,@function
+mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB31_2
+# BB#1: # %nocarry
+ retq
+.LBB31_2: # %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end31:
+ .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2
+
+ .globl mcl_fp_subNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF2Lbmi2,@function
+mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2
+# BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end32:
+ .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2
+
+ .globl mcl_fpDbl_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add2Lbmi2,@function
+mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+.Lfunc_end33:
+ .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2
+
+ .globl mcl_fpDbl_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2Lbmi2,@function
+mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2
+
+ .globl mcl_fp_mulUnitPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3Lbmi2,@function
+mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2
+# BB#0:
+ mulxq 16(%rsi), %r8, %rcx
+ mulxq 8(%rsi), %r9, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r9, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2
+
+ .globl mcl_fpDbl_mulPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3Lbmi2,@function
+mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r8
+ movq (%r9), %rax
+ movq %r10, %rdx
+ mulxq %rax, %rdx, %r14
+ movq 16(%rsi), %r11
+ movq %rdx, (%rdi)
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ movq %r8, %rdx
+ mulxq %rax, %rax, %rcx
+ addq %r14, %rax
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ movq 8(%r9), %rsi
+ movq %r10, %rdx
+ mulxq %rsi, %rdx, %r14
+ addq %rax, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r11, %rdx
+ mulxq %rsi, %rax, %r15
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rcx, %rsi
+ adcq %rbx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r14, %rsi
+ adcq %rdx, %rax
+ adcq %r15, %rcx
+ movq 16(%r9), %rbx
+ movq %r10, %rdx
+ mulxq %rbx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r11, %rdx
+ mulxq %rbx, %rsi, %r10
+ movq %r8, %rdx
+ mulxq %rbx, %rbx, %rdx
+ adcq %rax, %rbx
+ adcq %rcx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2
+
+ .globl mcl_fpDbl_sqrPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3Lbmi2,@function
+mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rax
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %r11, %r8
+ movq %rsi, %rdx
+ mulxq %rcx, %rdx, %r14
+ addq %rdx, %rax
+ movq %r14, %rbx
+ adcq %r11, %rbx
+ movq %r8, %rcx
+ adcq $0, %rcx
+ addq %rdx, %rax
+ movq %rax, 8(%rdi)
+ movq %r10, %rdx
+ mulxq %rsi, %rax, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rbx, %rsi
+ adcq %rax, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %r10, %rsi, %rdx
+ adcq %rax, %rcx
+ adcq %rbx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2
+
+ .globl mcl_fp_mont3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont3Lbmi2,@function
+mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r15
+ movq %r15, -32(%rsp) # 8-byte Spill
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq (%r15), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r14, %r11
+ movq (%rsi), %r12
+ movq %r12, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ movq %r12, %rdx
+ mulxq %rax, %r9, %rdi
+ addq %rbx, %rdi
+ adcq %r14, %r8
+ adcq $0, %r11
+ movq -8(%rcx), %r13
+ movq (%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq %r13, %rdx
+ movq 8(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r10
+ mulxq %rbx, %rsi, %rbx
+ addq %rax, %rbx
+ movq 16(%rcx), %rbp
+ mulxq %rbp, %rcx, %rax
+ movq %rbp, %r14
+ adcq %r10, %rcx
+ adcq $0, %rax
+ addq %r9, %rsi
+ adcq %rdi, %rbx
+ movq 8(%r15), %rdx
+ adcq %r8, %rcx
+ adcq %r11, %rax
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq -56(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %r11, %rdi
+ mulxq -16(%rsp), %r10, %rsi # 8-byte Folded Reload
+ mulxq %r12, %r8, %rbp
+ addq %r10, %rbp
+ adcq %r11, %rsi
+ adcq $0, %rdi
+ addq %rbx, %r8
+ adcq %rcx, %rbp
+ adcq %rax, %rsi
+ adcq %r9, %rdi
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %r9, %rcx
+ movq %r14, %r12
+ movq -40(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r10, %rax
+ mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload
+ addq %r10, %rbx
+ adcq %r9, %rax
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %rbp, %rbx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq $0, %r11
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r15, %r9, %rsi
+ mulxq -16(%rsp), %r10, %r15 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %rdi # 8-byte Folded Reload
+ addq %r10, %rdi
+ adcq %r9, %r15
+ adcq $0, %rsi
+ addq %rbx, %r8
+ adcq %rax, %rdi
+ adcq %rcx, %r15
+ adcq %r11, %rsi
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r13
+ movq %r13, %rdx
+ mulxq %r12, %r9, %rbp
+ movq %r13, %rdx
+ mulxq %r14, %r10, %rax
+ movq %r13, %rdx
+ movq -8(%rsp), %rcx # 8-byte Reload
+ mulxq %rcx, %r11, %rdx
+ addq %r10, %rdx
+ adcq %r9, %rax
+ adcq $0, %rbp
+ addq %r8, %r11
+ adcq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %rsi, %rbp
+ adcq $0, %rbx
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ movq %rax, %rdi
+ sbbq %r14, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rbp, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rsi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq %rax, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end38:
+ .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2
+
+ .globl mcl_fp_montNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF3Lbmi2,@function
+mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdx, %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq (%r10), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rbx, %r14
+ movq %rcx, %rdx
+ mulxq %rax, %r15, %r12
+ movq 16(%rsi), %r11
+ addq %rbx, %r12
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ adcq $0, %rbx
+ movq -8(%r8), %r9
+ movq (%r8), %r14
+ movq %r15, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rbp, %r13
+ addq %r15, %rbp
+ movq 8(%r8), %r15
+ mulxq %r15, %rdi, %rbp
+ adcq %r12, %rdi
+ movq 16(%r8), %r12
+ mulxq %r12, %rax, %r8
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq %r13, %rdi
+ movq 8(%r10), %rdx
+ adcq %rbp, %rax
+ adcq %r8, %rbx
+ movq -32(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %r8
+ mulxq %rcx, %r13, %rbp
+ addq %rsi, %rbp
+ mulxq %r11, %rcx, %rsi
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %rdi, %r13
+ adcq %rax, %rbp
+ adcq %rbx, %rcx
+ adcq $0, %rsi
+ movq %r13, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rdi, %rbx
+ addq %r13, %rdi
+ mulxq %r15, %rax, %rdi
+ adcq %rbp, %rax
+ mulxq %r12, %rbp, %rdx
+ adcq %rcx, %rbp
+ adcq $0, %rsi
+ addq %rbx, %rax
+ adcq %rdi, %rbp
+ adcq %rdx, %rsi
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 16(%rcx), %rdx
+ mulxq %r10, %rbx, %r8
+ mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq %r11, %rcx, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbx
+ addq %rax, %r10
+ adcq %rbp, %rdi
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ imulq %r10, %r9
+ movq %r9, %rdx
+ mulxq %r14, %rdx, %r8
+ addq %r10, %rdx
+ movq %r9, %rdx
+ mulxq %r12, %rbp, %rsi
+ movq %r9, %rdx
+ mulxq %r15, %rax, %rdx
+ adcq %rdi, %rax
+ adcq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %rdx, %rbp
+ adcq %rsi, %rbx
+ movq %rax, %rcx
+ subq %r14, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ movq %rsi, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 16(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end39:
+ .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2
+
+ .globl mcl_fp_montRed3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed3Lbmi2,@function
+mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r15
+ movq (%rcx), %r9
+ movq (%rsi), %rbx
+ movq %rbx, %rdx
+ imulq %r15, %rdx
+ movq 16(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r14, %r11
+ movq %rax, %rbp
+ movq 8(%rcx), %r10
+ mulxq %r10, %rax, %r13
+ mulxq %r9, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r14, %r13
+ adcq $0, %r11
+ movq 40(%rsi), %r14
+ movq 32(%rsi), %r12
+ addq %rbx, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r13
+ adcq 24(%rsi), %r11
+ adcq $0, %r12
+ adcq $0, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq %rbp, %rbp, %rdi
+ mulxq %r10, %r8, %rbx
+ mulxq %r9, %rdx, %rax
+ addq %r8, %rax
+ adcq %rbp, %rbx
+ adcq $0, %rdi
+ addq %rcx, %rdx
+ adcq %r13, %rax
+ adcq %r11, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ adcq $0, %rsi
+ imulq %rax, %r15
+ movq %r15, %rdx
+ movq -16(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r8, %rcx
+ movq %r15, %rdx
+ mulxq %r10, %r11, %r12
+ movq %r15, %rdx
+ mulxq %r9, %r15, %rdx
+ addq %r11, %rdx
+ adcq %r8, %r12
+ adcq $0, %rcx
+ addq %rax, %r15
+ adcq %rbx, %rdx
+ adcq %rdi, %r12
+ adcq %r14, %rcx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %r12, %rdi
+ sbbq %r10, %rdi
+ movq %rcx, %rbp
+ sbbq %r13, %rbp
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %r12, %rdi
+ movq %rdi, 8(%rcx)
+ movq %rbp, 16(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end40:
+ .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2
+
+ .globl mcl_fp_addPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre3Lbmi2,@function
+mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2
+# BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end41:
+ .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2
+
+ .globl mcl_fp_subPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre3Lbmi2,@function
+mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end42:
+ .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2
+
+ .globl mcl_fp_shr1_3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_3Lbmi2,@function
+mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2
+# BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end43:
+ .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2
+
+ .globl mcl_fp_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add3Lbmi2,@function
+mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+.LBB44_2: # %carry
+ retq
+.Lfunc_end44:
+ .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2
+
+ .globl mcl_fp_addNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF3Lbmi2,@function
+mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end45:
+ .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2
+
+ .globl mcl_fp_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub3Lbmi2,@function
+mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB46_2
+# BB#1: # %nocarry
+ retq
+.LBB46_2: # %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+.Lfunc_end46:
+ .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2
+
+ .globl mcl_fp_subNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF3Lbmi2,@function
+mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end47:
+ .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2
+
+ .globl mcl_fpDbl_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add3Lbmi2,@function
+mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end48:
+ .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2
+
+ .globl mcl_fpDbl_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3Lbmi2,@function
+mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2
+
+ .globl mcl_fp_mulUnitPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4Lbmi2,@function
+mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2
+# BB#0:
+ mulxq 24(%rsi), %r8, %r11
+ mulxq 16(%rsi), %r9, %rax
+ mulxq 8(%rsi), %r10, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq $0, %r11
+ movq %r11, 32(%rdi)
+ retq
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2
+
+ .globl mcl_fpDbl_mulPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4Lbmi2,@function
+mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r14
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %rdx, %rbp
+ movq %r14, %rdx
+ mulxq %rcx, %rdx, %r15
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r9
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %rbx, %r12
+ addq %r15, %rbx
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %r15
+ adcq %r12, %r13
+ movq %r11, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r15, %rcx
+ adcq $0, %r12
+ movq 8(%rbp), %rax
+ movq %r14, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ addq %rbx, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %r13, %r15
+ movq %r9, %rdx
+ mulxq %rax, %rbx, %r13
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %r12, %rcx
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -8(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r13, %rcx
+ movq %r8, 8(%rdi)
+ adcq %rax, %r12
+ movq %rbp, %r13
+ movq 16(%r13), %rax
+ movq %r14, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %r15, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq %r11, %rdx
+ mulxq %rax, %r14, %r11
+ movq %r9, %rdx
+ mulxq %rax, %r15, %rdx
+ adcq %rcx, %r15
+ adcq %r12, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r8, %rbp
+ adcq %r10, %r15
+ adcq %rdx, %r14
+ adcq %r11, %rcx
+ movq 24(%r13), %rdx
+ mulxq 24(%rsi), %rbx, %r8
+ mulxq (%rsi), %rax, %r9
+ addq %rbp, %rax
+ mulxq 16(%rsi), %rbp, %r10
+ mulxq 8(%rsi), %rsi, %rdx
+ movq %rax, 24(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %rcx, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2
+
+ .globl mcl_fpDbl_sqrPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4Lbmi2,@function
+mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r11
+ movq %rdx, (%rdi)
+ movq %r9, %rdx
+ mulxq %rcx, %rbp, %r10
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %r10, -8(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %r15
+ addq %r12, %r11
+ movq %r15, %rbx
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r10, %rcx
+ adcq $0, %r13
+ addq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %r12
+ adcq %rbx, %rbp
+ movq %r8, %rdx
+ mulxq %rax, %r10, %rbx
+ movq %r9, %rdx
+ mulxq %rax, %r14, %rdx
+ adcq %r14, %rcx
+ adcq %r13, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r15, %rbp
+ adcq %r12, %rcx
+ adcq %rdx, %r10
+ movq %rdx, %r12
+ adcq %rbx, %rax
+ movq %r11, 8(%rdi)
+ addq -16(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdi)
+ movq %r8, %rdx
+ mulxq %r9, %r11, %r8
+ movq %r9, %rdx
+ mulxq %r9, %r15, %rdx
+ adcq %r14, %rcx
+ adcq %r10, %r15
+ adcq %rax, %r11
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -8(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %rdx, %r11
+ adcq %r8, %rax
+ movq 24(%rsi), %rdx
+ mulxq 16(%rsi), %rbx, %r8
+ mulxq 8(%rsi), %rbp, %r9
+ mulxq (%rsi), %rsi, %r10
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r15, %rbp
+ adcq %r11, %rbx
+ mulxq %rdx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r10, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 40(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2
+
+ .globl mcl_fp_mont4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont4Lbmi2,@function
+mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %r15
+ movq 16(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r11
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %r14
+ movq %rdi, %rdx
+ mulxq %rax, %r13, %r12
+ addq %rbp, %r12
+ adcq %rbx, %r14
+ adcq %r10, %r11
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq %r13, %rdx
+ imulq %rax, %rdx
+ movq 24(%rcx), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulxq %rsi, %r10, %r8
+ mulxq %rbp, %r9, %rbx
+ mulxq %rax, %rsi, %rcx
+ mulxq %rdi, %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq $0, %r8
+ addq %r13, %rdx
+ adcq %r12, %rbp
+ adcq %r14, %rcx
+ adcq %r11, %rbx
+ adcq %r15, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 8(%rdx), %rdx
+ mulxq -40(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r15, %r11 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r9, %rdi # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rsi # 8-byte Folded Reload
+ addq %r9, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r11
+ adcq $0, %r14
+ addq %rbp, %r10
+ adcq %rcx, %rsi
+ adcq %rbx, %rdi
+ adcq %r8, %r11
+ adcq %rax, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ movq %r10, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -72(%rsp), %r15, %r9 # 8-byte Folded Reload
+ mulxq -8(%rsp), %r12, %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %rbp, %rcx
+ mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rbp, %rax
+ adcq %r12, %rcx
+ adcq %r15, %r8
+ adcq $0, %r9
+ addq %r10, %rdx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq %r11, %r8
+ adcq %r14, %r9
+ adcq $0, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -40(%rsp), %r15, %r11 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %r15, %r14
+ adcq $0, %r11
+ addq %rax, %r10
+ adcq %rcx, %rdi
+ adcq %r8, %rbp
+ adcq %r9, %r14
+ adcq %rbx, %r11
+ sbbq %rbx, %rbx
+ movq %r10, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r13, %rcx, %rsi
+ mulxq -24(%rsp), %r8, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -8(%rsp), %rcx, %r15 # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ movq -72(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %r15, %r9
+ adcq $0, %rsi
+ andl $1, %ebx
+ addq %r10, %r8
+ adcq %rdi, %rax
+ adcq %rbp, %rcx
+ adcq %r14, %r9
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -40(%rsp), %r11, %r8 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r15, %rdi # 8-byte Folded Reload
+ mulxq -64(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rbp # 8-byte Folded Reload
+ addq %r12, %rbp
+ adcq %r15, %r14
+ adcq %r11, %rdi
+ adcq $0, %r8
+ addq %rax, %r10
+ adcq %rcx, %rbp
+ adcq %r9, %r14
+ adcq %rsi, %rdi
+ adcq %rbx, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -16(%rsp), %rdx # 8-byte Reload
+ imulq %r10, %rdx
+ mulxq %r13, %rcx, %rsi
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r11, %rbx # 8-byte Folded Reload
+ mulxq -80(%rsp), %r15, %rcx # 8-byte Folded Reload
+ movq -24(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r12, %r13
+ addq %r15, %r13
+ adcq %r11, %rcx
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %r10, %r12
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %rbx
+ adcq %r8, %rsi
+ adcq $0, %rax
+ movq %r13, %rdi
+ subq %r9, %rdi
+ movq %rcx, %rbp
+ sbbq -80(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -72(%rsp), %rdx # 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r13, %rdi
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rax)
+ cmovneq %rbx, %r8
+ movq %r8, 16(%rax)
+ movq %rdx, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end53:
+ .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2
+
+ .globl mcl_fp_montNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF4Lbmi2,@function
+mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdx, %r15
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %rdi, %rdx
+ mulxq %rax, %r12, %rbx
+ movq 16(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %rbp, %rbx
+ mulxq %rax, %r14, %rbp
+ adcq %r9, %r14
+ movq 24(%rsi), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %rdi
+ adcq %rbp, %r8
+ adcq $0, %rdi
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %r13, %rdx
+ mulxq %rax, %rax, %r11
+ addq %r12, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rcx, %rdx
+ adcq %r8, %rcx
+ adcq $0, %rdi
+ addq %r11, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq %rdx, %rdi
+ movq 8(%r15), %rdx
+ movq -72(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbx, %r9
+ movq -64(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %r10, %r11
+ addq %rbx, %r11
+ mulxq -48(%rsp), %rax, %r8 # 8-byte Folded Reload
+ adcq %r9, %rax
+ mulxq -16(%rsp), %r9, %rbx # 8-byte Folded Reload
+ adcq %r8, %r9
+ adcq $0, %rbx
+ addq %rbp, %r10
+ adcq %rsi, %r11
+ adcq %rcx, %rax
+ adcq %rdi, %r9
+ adcq $0, %rbx
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq -56(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rcx, %r8
+ addq %r10, %rcx
+ mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %r11, %r10
+ mulxq -40(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -8(%rsp), %rax, %rdx # 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq $0, %rbx
+ addq %r8, %r10
+ adcq %rdi, %rcx
+ adcq %rsi, %rax
+ adcq %rdx, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r12, %rsi, %r8
+ mulxq %r15, %r11, %rbp
+ addq %rsi, %rbp
+ movq -48(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rdi, %r9
+ adcq %r8, %rdi
+ mulxq -16(%rsp), %r8, %rsi # 8-byte Folded Reload
+ adcq %r9, %r8
+ adcq $0, %rsi
+ addq %r10, %r11
+ adcq %rcx, %rbp
+ adcq %rax, %rdi
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r11, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r10
+ addq %r11, %rax
+ movq -24(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r9, %rbx
+ adcq %rbp, %r9
+ movq -40(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rax, %rbp
+ adcq %rdi, %rax
+ mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %r10, %r9
+ adcq %rbx, %rax
+ adcq %rbp, %rcx
+ adcq %rdx, %rsi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -72(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r11, %rbp # 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq %r12, %rdi, %r10
+ adcq %r8, %rdi
+ mulxq -16(%rsp), %r8, %rbx # 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %rax, %rbp
+ adcq %rcx, %rdi
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ imulq %r11, %r13
+ movq %r13, %rdx
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rcx, %r9
+ addq %r11, %rcx
+ movq %r13, %rdx
+ mulxq %r14, %r11, %r10
+ adcq %rbp, %r11
+ movq %r13, %rdx
+ movq %r15, %rsi
+ mulxq %rsi, %rax, %rcx
+ adcq %rdi, %rax
+ movq %r13, %rdx
+ movq -8(%rsp), %rbp # 8-byte Reload
+ mulxq %rbp, %r15, %rdx
+ adcq %r8, %r15
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %r10, %rax
+ adcq %rcx, %r15
+ adcq %rdx, %rbx
+ movq %r11, %rcx
+ subq %r12, %rcx
+ movq %rax, %rdx
+ sbbq %r14, %rdx
+ movq %r15, %rdi
+ sbbq %rsi, %rdi
+ movq %rbx, %rsi
+ sbbq %rbp, %rsi
+ cmovsq %r11, %rcx
+ movq -80(%rsp), %rbp # 8-byte Reload
+ movq %rcx, (%rbp)
+ cmovsq %rax, %rdx
+ movq %rdx, 8(%rbp)
+ cmovsq %r15, %rdi
+ movq %rdi, 16(%rbp)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rbp)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end54:
+ .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2
+
+ .globl mcl_fp_montRed4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed4Lbmi2,@function
+mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq 24(%rcx), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %r9, %r15
+ movq %rdi, %r14
+ movq 16(%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ mulxq %rdi, %rdi, %rbx
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq %rcx, %rcx, %r8
+ mulxq %rax, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq $0, %r15
+ movq 56(%rsi), %r11
+ movq 48(%rsi), %rcx
+ addq %r10, %rdx
+ movq 40(%rsi), %r12
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq $0, %r12
+ adcq $0, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq $0, %r11
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbp, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r9
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r14, %rdi # 8-byte Folded Reload
+ mulxq -16(%rsp), %r10, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r14, %rcx
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbp, %rdx
+ adcq %r8, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq %r12, %r9
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, %r11
+ movq %r11, -40(%rsp) # 8-byte Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ movq -48(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rbp, %r8
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq -8(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %rbx, %r10
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r12, %rbp # 8-byte Folded Reload
+ movq -24(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rdx, %rbx
+ addq %r12, %rbx
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rax, %rdx
+ adcq %rcx, %rbx
+ adcq %rdi, %rbp
+ adcq %r9, %r10
+ adcq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ imulq %rbx, %r13
+ movq %r13, %rdx
+ mulxq %r15, %rax, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r13, %rdx
+ mulxq %r11, %r9, %rax
+ movq %r13, %rdx
+ movq -16(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %r12, %rcx
+ movq %r13, %rdx
+ mulxq %r14, %r15, %r13
+ addq %r12, %r13
+ adcq %r9, %rcx
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbx, %r15
+ adcq %rbp, %r13
+ adcq %r10, %rcx
+ adcq %r8, %rax
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %r13, %rdx
+ subq %r14, %rdx
+ movq %rcx, %rbp
+ sbbq %r11, %rbp
+ movq %rax, %r8
+ sbbq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -48(%rsp), %rbx # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rdi, %rbx
+ testb %sil, %sil
+ cmovneq %r13, %rdx
+ movq -56(%rsp), %rsi # 8-byte Reload
+ movq %rdx, (%rsi)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rsi)
+ cmovneq %rax, %r8
+ movq %r8, 16(%rsi)
+ movq %rbx, 24(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end55:
+ .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2
+
+ .globl mcl_fp_addPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre4Lbmi2,@function
+mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end56:
+ .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2
+
+ .globl mcl_fp_subPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre4Lbmi2,@function
+mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end57:
+ .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2
+
+ .globl mcl_fp_shr1_4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_4Lbmi2,@function
+mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2
+# BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end58:
+ .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2
+
+ .globl mcl_fp_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add4Lbmi2,@function
+mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+.LBB59_2: # %carry
+ retq
+.Lfunc_end59:
+ .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2
+
+ .globl mcl_fp_addNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF4Lbmi2,@function
+mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end60:
+ .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2
+
+ .globl mcl_fp_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub4Lbmi2,@function
+mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB61_2
+# BB#1: # %nocarry
+ retq
+.LBB61_2: # %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+.Lfunc_end61:
+ .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2
+
+ .globl mcl_fp_subNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF4Lbmi2,@function
+mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r8
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ subq (%rdx), %r9
+ sbbq 8(%rdx), %r10
+ sbbq 16(%rdx), %r8
+ sbbq 24(%rdx), %r11
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r9, %rdx
+ movq %rdx, (%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end62:
+ .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2
+
+ .globl mcl_fpDbl_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add4Lbmi2,@function
+mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end63:
+ .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2
+
+ .globl mcl_fpDbl_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4Lbmi2,@function
+mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2
+
+ .globl mcl_fp_mulUnitPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5Lbmi2,@function
+mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ mulxq 32(%rsi), %r8, %r11
+ mulxq 24(%rsi), %r9, %rax
+ mulxq 16(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r14, %rbx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r8, %rax
+ movq %rax, 32(%rdi)
+ adcq $0, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2
+
+ .globl mcl_fpDbl_mulPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5Lbmi2,@function
+mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r11
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %r10, %rdx
+ mulxq %rcx, %rax, %r14
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r15
+ addq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rcx, %rax, %r13
+ adcq %r14, %rax
+ movq %rbp, %rdx
+ mulxq %rcx, %r8, %r12
+ adcq %r13, %r8
+ movq 32(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %r9, %r13
+ adcq %r12, %r9
+ movq -24(%rsp), %rcx # 8-byte Reload
+ movq %rcx, (%rdi)
+ adcq $0, %r13
+ movq -48(%rsp), %rdi # 8-byte Reload
+ movq 8(%rdi), %rbp
+ movq %r11, %rdx
+ mulxq %rbp, %r12, %r11
+ addq %rbx, %r12
+ movq %r10, %rdx
+ mulxq %rbp, %rbx, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rbp, %rcx, %r10
+ adcq %r8, %rcx
+ movq -16(%rsp), %rdx # 8-byte Reload
+ mulxq %rbp, %rax, %r8
+ adcq %r9, %rax
+ movq %r14, %rdx
+ mulxq %rbp, %r15, %rdx
+ adcq %r13, %r15
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %r11, %rbx
+ movq -8(%rsp), %rbp # 8-byte Reload
+ movq %r12, 8(%rbp)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r8, %r15
+ adcq %rdx, %r14
+ movq (%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -16(%rsp) # 8-byte Spill
+ movq 16(%rdi), %rbp
+ mulxq %rbp, %r12, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbx, %r12
+ movq %r8, %rdx
+ mulxq %rbp, %rbx, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rbp, %rcx, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq %rax, %rcx
+ movq 24(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rbp, %r9, %r10
+ adcq %r15, %r9
+ movq 32(%rsi), %r15
+ movq %r15, %rdx
+ mulxq %rbp, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -32(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq %rdx, %r14
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %r12, 16(%r10)
+ movq %rdi, %rbp
+ movq 24(%rbp), %rax
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r12, %rdi
+ addq %rbx, %r12
+ movq -16(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %r11
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rax, %r13, %r9
+ adcq %r8, %r13
+ movq %r15, %rdx
+ mulxq %rax, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdi, %rbx
+ movq %r12, 24(%r10)
+ movq %r10, %rdi
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r9, %r8
+ adcq %rdx, %r14
+ movq 32(%rbp), %rdx
+ mulxq 8(%rsi), %rax, %r9
+ mulxq (%rsi), %rbp, %r10
+ addq %rbx, %rbp
+ adcq %rcx, %rax
+ mulxq 16(%rsi), %rbx, %r15
+ adcq %r13, %rbx
+ mulxq 32(%rsi), %rcx, %r11
+ mulxq 24(%rsi), %rsi, %rdx
+ movq %rbp, 32(%rdi)
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r10, %rax
+ movq %rax, 40(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 64(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2
+
+ .globl mcl_fpDbl_sqrPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5Lbmi2,@function
+mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %r11, %rdx
+ mulxq %rax, %rbx, %r15
+ movq 32(%rsi), %r9
+ movq %r9, -8(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r13
+ movq %rcx, %rdx
+ mulxq %rax, %r12, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %r14
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ addq %r12, %r14
+ adcq %rbp, %rbx
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ adcq %r15, %r8
+ movq %r9, %rdx
+ mulxq %rax, %rbp, %r15
+ adcq %r10, %rbp
+ movq -24(%rsp), %rax # 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r15
+ addq %r12, %r14
+ movq %rcx, %rdx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %rcx, %rbx, %r10
+ adcq %r8, %rbx
+ movq %r13, %rdx
+ mulxq %rcx, %r13, %r8
+ adcq %rbp, %r13
+ movq %r9, %rdx
+ mulxq %rcx, %r12, %rcx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %r14, 8(%rdi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %r8, %r12
+ adcq %rcx, %r15
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ movq %r9, %rdx
+ mulxq %r11, %rbp, %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ addq %rax, %rbp
+ movq %r10, %rdx
+ mulxq %r11, %rax, %r8
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %r11, %r14, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %r13, %r14
+ movq 24(%rsi), %rcx
+ movq %rcx, %rdx
+ mulxq %r11, %rbx, %r13
+ adcq %r12, %rbx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ mulxq %r11, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %r14
+ movq %rbp, 16(%rdi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r13, %r12
+ adcq %rdx, %r15
+ movq %r10, %rdx
+ mulxq %rcx, %r10, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ addq %rax, %r13
+ movq 16(%rsi), %r8
+ movq 32(%rsi), %rax
+ adcq %r14, %r10
+ movq %r8, %rdx
+ mulxq %rcx, %r9, %r14
+ adcq %rbx, %r9
+ movq %rcx, %rdx
+ mulxq %rcx, %r11, %rbp
+ adcq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r13, 24(%rdi)
+ adcq -8(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq %rbp, %r12
+ adcq %rdx, %rbx
+ movq %rax, %rdx
+ mulxq 24(%rsi), %rbp, %r14
+ movq %rax, %rdx
+ mulxq (%rsi), %rcx, %r15
+ addq %r10, %rcx
+ movq %rax, %rdx
+ mulxq 8(%rsi), %rsi, %r10
+ movq %rcx, 32(%rdi)
+ adcq %r9, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %rcx, %r8
+ adcq %r11, %rcx
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rax
+ adcq %rbx, %rdx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r15, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r10, %rcx
+ movq %rcx, 48(%rdi)
+ adcq %r8, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 64(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2
+
+ .globl mcl_fp_mont5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont5Lbmi2,@function
+mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %r11
+ movq (%rsi), %rbp
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %r15, %r8
+ addq %rdi, %r8
+ adcq %r13, %r9
+ adcq %r12, %r11
+ adcq %r10, %r14
+ adcq $0, %rbx
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %r10
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ mulxq %rsi, %rax, %rbx
+ addq %rdi, %rbx
+ movq 16(%rcx), %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %rdi, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r15, %rax
+ adcq %r8, %rbx
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r14, %r10
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -64(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %r8, %rsi # 8-byte Folded Reload
+ mulxq -88(%rsp), %r11, %rax # 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbx, %r11
+ adcq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r10, %r9
+ adcq %r12, %r15
+ adcq %rbp, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r11, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ movq %rcx, -104(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %r13, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %r8, %rbx # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload
+ addq %r8, %rbp
+ adcq %r13, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r11, %rdx
+ adcq %rax, %rbp
+ adcq %rsi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rdi
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -64(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rax, %r11 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ mulxq -88(%rsp), %r14, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r13, %r8
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbp, %r14
+ adcq %rbx, %rax
+ adcq %rcx, %r8
+ adcq %rdi, %r9
+ adcq %r10, %r11
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq %r14, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ movq %rcx, -104(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rdi, %rbx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq %rax, %rbp
+ adcq %r8, %rbx
+ adcq %r9, %rsi
+ adcq %r11, %r10
+ adcq %r15, %r12
+ adcq $0, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -64(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %r11, %r14 # 8-byte Folded Reload
+ mulxq -80(%rsp), %r8, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rax, %rdi # 8-byte Folded Reload
+ mulxq -88(%rsp), %r15, %rcx # 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %r8, %rdi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq -104(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r15
+ adcq %rbx, %rcx
+ adcq %rsi, %rdi
+ adcq %r10, %r9
+ adcq %r12, %r14
+ adcq %r13, %rax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r15, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rax, %rbp # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r13, %r10 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ mulxq -24(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %r11
+ adcq %r13, %r8
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %rdx
+ adcq %rcx, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r8
+ adcq %r14, %r10
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq $0, %r12
+ movq -56(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -64(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq -88(%rsp), %r13, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ adcq %rsi, %r9
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rax, %r13
+ adcq %r11, %rdi
+ adcq %r8, %r9
+ adcq %r10, %r15
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ sbbq %rax, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -32(%rsp), %r10, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %r8, %rsi # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -40(%rsp), %rbp, %r11 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -16(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ adcq %r11, %rcx
+ mulxq -8(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ adcq %r12, %rsi
+ adcq $0, %r11
+ andl $1, %eax
+ addq %r13, %r10
+ adcq %rdi, %r8
+ adcq %r9, %rbp
+ adcq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r14, %r11
+ adcq $0, %rax
+ movq %r8, %rdi
+ subq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rbp, %rbx
+ sbbq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -40(%rsp), %r9 # 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r8, %rdi
+ movq -112(%rsp), %rax # 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rbp, %rbx
+ movq %rbx, 8(%rax)
+ cmovneq %rcx, %r9
+ movq %r9, 16(%rax)
+ movq %rdx, 24(%rax)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end68:
+ .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2
+
+ .globl mcl_fp_montNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF5Lbmi2,@function
+mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ movq 16(%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbp, %r10
+ mulxq %rax, %rbp, %rbx
+ adcq %r9, %rbp
+ movq 24(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %r9
+ adcq %rbx, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r11
+ adcq %r9, %rax
+ adcq $0, %r11
+ movq -8(%rcx), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq %r8, %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ mulxq %rsi, %rbx, %r14
+ addq %r8, %rbx
+ movq 8(%rcx), %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq %rsi, %rbx, %r12
+ adcq %r10, %rbx
+ movq 16(%rcx), %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ mulxq %rsi, %r10, %rdi
+ adcq %rbp, %r10
+ movq 24(%rcx), %rsi
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %r15, %r9
+ movq 32(%rcx), %rcx
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ mulxq %rcx, %r8, %rcx
+ adcq %rax, %r8
+ adcq $0, %r11
+ addq %r14, %rbx
+ adcq %r12, %r10
+ adcq %rdi, %r9
+ adcq %rbp, %r8
+ adcq %rcx, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -24(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq %r13, %r14, %rax
+ addq %rcx, %rax
+ mulxq -32(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %rbx, %r14
+ adcq %r10, %rax
+ adcq %r9, %rcx
+ adcq %r8, %rsi
+ adcq %r11, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ movq -72(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ mulxq -88(%rsp), %rbx, %r15 # 8-byte Folded Reload
+ addq %r14, %rbx
+ movq -80(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r8, %rbx
+ adcq %rax, %r8
+ mulxq -96(%rsp), %r9, %rax # 8-byte Folded Reload
+ adcq %rcx, %r9
+ mulxq -56(%rsp), %r10, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq $0, %rbp
+ addq %r15, %r8
+ adcq %rbx, %r9
+ adcq %rax, %r10
+ adcq %rcx, %r11
+ adcq %rdx, %rbp
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -32(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -40(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ adcq %rcx, %rdi
+ mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload
+ adcq %r15, %rcx
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rbx
+ adcq %r11, %rdi
+ adcq %rbp, %rcx
+ adcq $0, %rax
+ movq %r14, %rdx
+ imulq %r12, %rdx
+ movq -88(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbp, %r15
+ addq %r14, %rbp
+ mulxq %r13, %r8, %rbp
+ adcq %rsi, %r8
+ movq -96(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %rbx, %r9
+ mulxq -56(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload
+ adcq %rcx, %r11
+ adcq $0, %rax
+ addq %r15, %r8
+ adcq %rbp, %r9
+ adcq %rsi, %r10
+ adcq %rbx, %r11
+ adcq %rdx, %rax
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 24(%rcx), %rdx
+ mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rcx # 8-byte Folded Reload
+ addq %rdi, %rcx
+ mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbx
+ mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %r8, %r14
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq %r11, %rsi
+ adcq %rax, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ imulq -72(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r12, %rax, %r11
+ addq %r14, %rax
+ mulxq -80(%rsp), %r8, %r14 # 8-byte Folded Reload
+ adcq %rcx, %r8
+ mulxq %r13, %r9, %rax
+ adcq %rbx, %r9
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %r10, %rbx
+ adcq %rsi, %r10
+ mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq $0, %rbp
+ addq %r11, %r8
+ adcq %r14, %r9
+ adcq %rax, %r10
+ adcq %rbx, %rcx
+ adcq %rdx, %rbp
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -24(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload
+ addq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %rbx, %rdi
+ mulxq -40(%rsp), %rbx, %r15 # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -48(%rsp), %r11, %rax # 8-byte Folded Reload
+ adcq %r15, %r11
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbx
+ adcq %rbp, %r11
+ adcq $0, %rax
+ movq -72(%rsp), %rdx # 8-byte Reload
+ imulq %r14, %rdx
+ movq -88(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rcx, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ addq %r14, %rcx
+ movq -80(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r14, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %rsi, %r14
+ movq %r13, %r8
+ mulxq %r8, %r15, %r13
+ adcq %rdi, %r15
+ mulxq %r12, %rbp, %rcx
+ adcq %rbx, %rbp
+ movq -8(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %r12, %rdx
+ adcq %r11, %r12
+ adcq $0, %rax
+ addq -16(%rsp), %r14 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq %r13, %rbp
+ adcq %rcx, %r12
+ adcq %rdx, %rax
+ movq %r14, %rcx
+ subq %r10, %rcx
+ movq %r15, %rsi
+ sbbq %r9, %rsi
+ movq %rbp, %rdi
+ sbbq %r8, %rdi
+ movq %r12, %r8
+ sbbq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %rax, %rdx
+ sbbq %rbx, %rdx
+ movq %rdx, %rbx
+ sarq $63, %rbx
+ cmovsq %r14, %rcx
+ movq -104(%rsp), %rbx # 8-byte Reload
+ movq %rcx, (%rbx)
+ cmovsq %r15, %rsi
+ movq %rsi, 8(%rbx)
+ cmovsq %rbp, %rdi
+ movq %rdi, 16(%rbx)
+ cmovsq %r12, %r8
+ movq %r8, 24(%rbx)
+ cmovsq %rax, %rdx
+ movq %rdx, 32(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end69:
+ .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2
+
+ .globl mcl_fp_montRed5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed5Lbmi2,@function
+mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq %rax, %r15
+ movq 32(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r13
+ movq 24(%rcx), %r12
+ movq %r12, -32(%rsp) # 8-byte Spill
+ mulxq %r12, %r10, %r14
+ movq 16(%rcx), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ mulxq %rcx, %rax, %r11
+ mulxq %rbx, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rdi, %r11
+ adcq %r10, %rbp
+ adcq %r8, %r14
+ adcq $0, %r13
+ addq %r9, %rdx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r14
+ adcq 40(%rsi), %r13
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq -40(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq %r12, %rax, %r10
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq -48(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbx, %r8
+ mulxq -16(%rsp), %r9, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rbx, %rdi
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rcx, %rdx
+ adcq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %r14, %r8
+ adcq %r13, %r10
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq -64(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -40(%rsp), %rcx, %r13 # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ mulxq %r12, %r11, %rbx
+ mulxq -16(%rsp), %r9, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %r9, %rcx
+ adcq %r11, %rbp
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %rbp
+ adcq %r10, %rbx
+ adcq %r15, %r14
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rcx, %rdx
+ imulq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %rax, %r12
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rax, %r10 # 8-byte Folded Reload
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r8, %r11 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r8, %r15
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -72(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rdx
+ adcq %rbp, %rax
+ adcq %rbx, %r15
+ adcq %r14, %r11
+ adcq %r13, %r10
+ adcq -56(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq -64(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ mulxq %r9, %rdi, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r13, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %r8, %r9 # 8-byte Folded Reload
+ movq -16(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rbx, %rdx
+ addq %r9, %rbx
+ adcq %r13, %rdx
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rax, %r8
+ adcq %r15, %rbx
+ adcq %r11, %rdx
+ adcq %r10, %rbp
+ adcq %r12, %rdi
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %rbx, %rax
+ subq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq %r14, %r8
+ movq %rbp, %r9
+ sbbq -48(%rsp), %r9 # 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -40(%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %r11
+ testb %sil, %sil
+ cmovneq %rbx, %rax
+ movq -80(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdx, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ cmovneq %rdi, %r10
+ movq %r10, 24(%rcx)
+ movq %r11, 32(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end70:
+ .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2
+
+ .globl mcl_fp_addPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre5Lbmi2,@function
+mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2
+# BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end71:
+ .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2
+
+ .globl mcl_fp_subPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre5Lbmi2,@function
+mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+.Lfunc_end72:
+ .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2
+
+ .globl mcl_fp_shr1_5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_5Lbmi2,@function
+mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2
+# BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+.Lfunc_end73:
+ .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2
+
+ .globl mcl_fp_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add5Lbmi2,@function
+mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+.LBB74_2: # %carry
+ popq %rbx
+ retq
+.Lfunc_end74:
+ .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2
+
+ .globl mcl_fp_addNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF5Lbmi2,@function
+mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end75:
+ .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2
+
+ .globl mcl_fp_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub5Lbmi2,@function
+mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+.LBB76_2: # %nocarry
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end76:
+ .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2
+
+ .globl mcl_fp_subNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF5Lbmi2,@function
+mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r14
+ subq (%rdx), %r10
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ sbbq 24(%rdx), %r8
+ sbbq 32(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ movq 32(%rcx), %r15
+ andq %rax, %r15
+ rorxq $63, %rax, %rsi
+ andq 24(%rcx), %rax
+ andq 16(%rcx), %rsi
+ addq %r10, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end77:
+ .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2
+
+ .globl mcl_fpDbl_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add5Lbmi2,@function
+mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end78:
+ .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2
+
+ .globl mcl_fpDbl_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5Lbmi2,@function
+mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2
+
+ .globl mcl_fp_mulUnitPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6Lbmi2,@function
+mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ mulxq 40(%rsi), %r8, %r11
+ mulxq 32(%rsi), %r9, %r12
+ mulxq 24(%rsi), %r10, %rcx
+ mulxq 16(%rsi), %r14, %rbx
+ mulxq 8(%rsi), %r15, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r14, %rax
+ movq %rax, 16(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 32(%rdi)
+ adcq %r8, %r12
+ movq %r12, 40(%rdi)
+ adcq $0, %r11
+ movq %r11, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2
+
+ .globl mcl_fpDbl_mulPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6Lbmi2,@function
+mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r11
+ movq %r11, -16(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq (%r11), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rcx, %r14
+ movq %r15, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rcx, %rbp
+ mulxq %rax, %rcx, %r12
+ adcq %r14, %rcx
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r14
+ adcq %r12, %rbx
+ movq 32(%rsi), %r12
+ movq %r12, %rdx
+ mulxq %rax, %r8, %r13
+ adcq %r14, %r8
+ movq 40(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rax, %r9, %r10
+ adcq %r13, %r9
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r10
+ movq 8(%r11), %rdi
+ movq %r15, %rdx
+ mulxq %rdi, %r13, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ addq %rbp, %r13
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rcx, %rbp
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %r11
+ adcq %rbx, %rax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq %r12, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r14, %rdx
+ mulxq %rdi, %r12, %rdx
+ adcq %r10, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ adcq %r11, %rbx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %r13, 8(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -24(%rsp) # 8-byte Spill
+ movq -16(%rsp), %r14 # 8-byte Reload
+ movq 16(%r14), %rdi
+ mulxq %rdi, %r13, %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ addq %rbp, %r13
+ movq %r8, %rdx
+ mulxq %rdi, %r8, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq %rax, %r8
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %rbx, %r11
+ movq 24(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %rax, %rbx
+ adcq %rcx, %rax
+ movq 32(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rdi, %r10, %rcx
+ adcq %r12, %r10
+ movq 40(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -72(%rsp), %r8 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ adcq %rbx, %r10
+ adcq %rcx, %r9
+ adcq %rdx, %rbp
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %r13, 16(%rcx)
+ movq 24(%r14), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r12, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ addq %r8, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %r11, %rbx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rcx, %r11
+ adcq %rax, %rcx
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r14, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %r10, %r14
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r8, %rax
+ adcq %r9, %r8
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r13, %rdx
+ adcq %rbp, %r13
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -32(%rsp), %rbx # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %r14
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %r12, 24(%rdi)
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq %rax, %r13
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdi
+ mulxq %rdi, %r12, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ addq %rbx, %r12
+ movq %rbp, %rdx
+ mulxq %rdi, %rbx, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq %r14, %rax
+ movq 24(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq %r8, %rbp
+ movq 32(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r10
+ adcq %r13, %rcx
+ movq 40(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r10, %r9
+ adcq %rdx, %rsi
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %r12, 32(%r10)
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 40(%rdx), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ addq %rbx, %r15
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %r12
+ adcq %rax, %rbx
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %r11
+ adcq %rbp, %rax
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %r14
+ adcq %rcx, %rbp
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rdi, %rdi, %r9
+ adcq %rsi, %rdi
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -16(%rsp), %rbx # 8-byte Folded Reload
+ movq %r15, 40(%r10)
+ movq %rbx, 48(%r10)
+ adcq %r12, %rax
+ movq %rax, 56(%r10)
+ adcq %r11, %rbp
+ movq %rbp, 64(%r10)
+ adcq %r14, %rcx
+ movq %rcx, 72(%r10)
+ adcq %r8, %rdi
+ movq %rdi, 80(%r10)
+ adcq %r9, %rsi
+ movq %rsi, 88(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2
+
+ .globl mcl_fpDbl_sqrPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6Lbmi2,@function
+mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r9
+ movq %r9, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r10, %r8
+ movq 24(%rsi), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r11, %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r14
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %r11, %r14
+ adcq %rbx, %r10
+ movq %rbp, %rdx
+ mulxq %rcx, %r15, %rbp
+ adcq %r8, %r15
+ movq 32(%rsi), %rbx
+ movq %rbx, %rdx
+ mulxq %rcx, %r8, %r13
+ adcq %rbp, %r8
+ movq 40(%rsi), %rdi
+ movq %rdi, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r13, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ movq %rdx, (%r9)
+ adcq $0, %r12
+ addq %r11, %r14
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r10, %rbp
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r13, %r10
+ adcq %r15, %r13
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r8
+ adcq %rcx, %rbx
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rax
+ adcq %r12, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %r13 # 8-byte Folded Reload
+ movq %r14, 8(%r9)
+ adcq %r10, %r15
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r8, %r11
+ adcq %rax, %r12
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rcx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %rbp, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rcx, %rbp, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rcx, %rdx
+ mulxq %rcx, %r13, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq %r15, %r13
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rcx, %r8, %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq 32(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rcx, %r14, %r15
+ adcq %r11, %r14
+ movq 40(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rcx, %r9, %rdx
+ adcq %r12, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r15, %r9
+ adcq %rdx, %rcx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbp, %rdi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rbp
+ adcq %r13, %r15
+ adcq %r8, %rbx
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r12, %r10
+ adcq %r9, %r12
+ movq %r11, %rdx
+ mulxq %rax, %r13, %rax
+ adcq %rcx, %r13
+ sbbq %r9, %r9
+ andl $1, %r9d
+ addq -32(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rbp # 8-byte Reload
+ movq %rbp, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %rax, %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ addq %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %rbx, %r15
+ movq 16(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rax, %r14, %rbx
+ adcq %r8, %r14
+ movq 24(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rax, %rbp, %rdi
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %r11, %r12
+ adcq %r13, %r11
+ movq 40(%rsi), %rsi
+ movq %rsi, %rdx
+ mulxq %rax, %r13, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r13, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rdi, %r11
+ adcq %r12, %r9
+ adcq %rdx, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %r12, %rcx
+ addq %r15, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rsi, %rdi, %r15
+ adcq %r14, %rdi
+ movq %r10, %rdx
+ mulxq %rsi, %rbx, %r10
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rsi, %rbp, %r8
+ adcq %r11, %rbp
+ adcq %r13, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rcx, %rdi
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq %rcx, 32(%rdx)
+ movq %r12, 40(%rdx)
+ movq %rdi, 48(%rdx)
+ adcq %r15, %rbx
+ movq %rbx, 56(%rdx)
+ adcq %r10, %rbp
+ movq %rbp, 64(%rdx)
+ adcq %r8, %r9
+ movq %r9, 72(%rdx)
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 80(%rdx)
+ adcq %r11, %rax
+ movq %rax, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2
+
+ .globl mcl_fp_mont6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont6Lbmi2,@function
+mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $32, %rsp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %r14
+ movq 32(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -56(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r12
+ movq %rdi, %rdx
+ mulxq %rax, %r9, %r10
+ movq %rsi, %rdx
+ mulxq %rax, %rdi, %r13
+ movq %rbp, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %rdi, %rbp
+ adcq %r9, %r13
+ adcq %r8, %r10
+ adcq %r15, %r12
+ adcq %r11, %rbx
+ movq %rbx, %rdi
+ adcq $0, %r14
+ movq %r14, -88(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ imulq %rax, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r11
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r14
+ mulxq %rsi, %r15, %rsi
+ addq %rax, %rsi
+ adcq %r8, %r14
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ adcq %r11, %rbx
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rcx
+ adcq %r8, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %r9
+ addq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbp, %rsi
+ adcq %r13, %r14
+ adcq %r10, %rbx
+ adcq %r12, %rax
+ adcq %rdi, %rcx
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -96(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 8(%rdx), %rdx
+ mulxq -40(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %r8, %rdi # 8-byte Folded Reload
+ mulxq -56(%rsp), %rbp, %r10 # 8-byte Folded Reload
+ addq %r8, %r10
+ mulxq -72(%rsp), %r9, %r11 # 8-byte Folded Reload
+ adcq %rdi, %r9
+ adcq -128(%rsp), %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rsi, %rbp
+ adcq %r14, %r10
+ adcq %rbx, %r9
+ adcq %rax, %r11
+ adcq %rcx, %r15
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq %r12, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r12 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq 16(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rbp, %rax
+ adcq %r10, %r8
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r15, %rsi
+ adcq %r13, %r14
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -40(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -112(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rbp, %r15 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r9, %rbx # 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %r11 # 8-byte Folded Reload
+ addq %r9, %r11
+ mulxq -72(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq %rbp, %r10
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq %rdi, %r11
+ adcq %rcx, %r9
+ adcq %rsi, %r10
+ adcq %r14, %r15
+ adcq %r12, %r13
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r8 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %rbx # 8-byte Folded Reload
+ addq %rcx, %rbx
+ mulxq 16(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %r12, %rsi
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r11, %rbx
+ adcq %r9, %rbp
+ adcq %r10, %rcx
+ adcq %r15, %rsi
+ adcq %r13, %r14
+ adcq -88(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -112(%rsp) # 8-byte Spill
+ movq -96(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -40(%rsp), %rdi, %rax # 8-byte Folded Reload
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %rax # 8-byte Folded Reload
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r15, %r12 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r8, %r11 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %r8, %rdi
+ mulxq -72(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq %r11, %r8
+ adcq %r15, %r9
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %r10
+ adcq %rbp, %rdi
+ adcq %rcx, %r8
+ adcq %rsi, %r9
+ adcq %r14, %r12
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r13, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %r14
+ movq %r10, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r11 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rbx # 8-byte Folded Reload
+ addq %rbp, %rbx
+ mulxq 16(%rsp), %rbp, %rax # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -8(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rax, %rsi
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r10, %rcx
+ adcq %rdi, %rbx
+ adcq %r8, %rbp
+ adcq %r9, %rsi
+ adcq %r12, %r15
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ movq %r14, %rdi
+ adcq $0, %rdi
+ movq -32(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -40(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rax, %r12 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %r8 # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -72(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ adcq %r9, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ adcq %rbp, %r8
+ adcq %rsi, %rcx
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq %r13, %r12
+ adcq %rdi, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbx, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r13, %r11 # 8-byte Folded Reload
+ mulxq 8(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq (%rsp), %rdi, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq 16(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -8(%rsp), %rax, %rsi # 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq %r8, %rbx
+ adcq %rcx, %rbp
+ adcq %r10, %rax
+ adcq %r14, %rsi
+ adcq %r12, %r11
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ movq -96(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ movq -32(%rsp), %rcx # 8-byte Reload
+ movq 40(%rcx), %rdx
+ mulxq -40(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r12 # 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r13, %r9 # 8-byte Folded Reload
+ addq %rcx, %r9
+ adcq %rdi, %r10
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq -40(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -32(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rbx, %r13
+ adcq %rbp, %r9
+ adcq %rax, %r10
+ adcq %rsi, %r12
+ adcq %r11, %r14
+ adcq %r15, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r8, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -24(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq (%rsp), %r11, %rax # 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ addq %rax, %rdi
+ mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq -8(%rsp), %rbx, %rbp # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload
+ adcq %rbp, %rax
+ mulxq 24(%rsp), %rbp, %rdx # 8-byte Folded Reload
+ adcq %r15, %rbp
+ adcq $0, %rdx
+ andl $1, %ecx
+ addq %r13, %r11
+ adcq %r9, %rdi
+ adcq %r10, %rsi
+ adcq %r12, %rbx
+ adcq %r14, %rax
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdi, %r8
+ subq (%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %r9
+ sbbq 8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq 16(%rsp), %r10 # 8-byte Folded Reload
+ movq %rax, %r11
+ sbbq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq 24(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rax, %r11
+ testb %cl, %cl
+ cmovneq %rdi, %r8
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rsi, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rbx, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 32(%rax)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rax)
+ addq $32, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end83:
+ .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2
+
+ .globl mcl_fp_montNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF6Lbmi2,@function
+mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r9, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %rdi, %r8
+ adcq %rbx, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ mulxq %rbp, %rbx, %r10
+ adcq %r8, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ mulxq %rbp, %r8, %r11
+ adcq %r10, %r8
+ movq 40(%rsi), %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ mulxq %rbp, %rsi, %r15
+ adcq %r11, %rsi
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %rax
+ movq %rax, -128(%rsp) # 8-byte Spill
+ addq %r9, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %r9
+ adcq %r14, %r12
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq %rax, %r14, %rax
+ adcq %rdi, %r14
+ movq 24(%rcx), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %r13, %rdi
+ adcq %rbx, %r13
+ movq 32(%rcx), %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ mulxq %rbp, %r11, %rbx
+ adcq %r8, %r11
+ movq 40(%rcx), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %rcx
+ adcq %rsi, %r10
+ adcq $0, %r15
+ addq -128(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rax, %r13
+ adcq %rdi, %r11
+ adcq %rbx, %r10
+ adcq %rcx, %r15
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -80(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq -64(%rsp), %rbx, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -88(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -104(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ movq %rbp, -128(%rsp) # 8-byte Spill
+ adcq %r8, %rdi
+ mulxq -112(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq -128(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r12, %rbx
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq %r11, %rsi
+ adcq %r10, %rdi
+ adcq %r15, %r8
+ adcq $0, %r9
+ movq %rbx, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r13 # 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq -24(%rsp), %r11, %rbx # 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -32(%rsp), %r14, %rax # 8-byte Folded Reload
+ adcq %rcx, %r14
+ mulxq -40(%rsp), %r10, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r15, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -56(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq $0, %r9
+ addq %r13, %r11
+ adcq %rbx, %r14
+ adcq %rax, %r10
+ adcq %rcx, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r9
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -80(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -88(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -96(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -104(%rsp), %rax, %rcx # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload
+ adcq -128(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq %r15, %rsi
+ adcq %r12, %rax
+ adcq %r9, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r12 # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -32(%rsp), %r9, %rdi # 8-byte Folded Reload
+ adcq %rbx, %r9
+ mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %rcx
+ addq %r12, %r11
+ adcq %rbp, %r9
+ adcq %rdi, %r10
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rax, %rcx
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -80(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rbp, %rsi
+ mulxq -104(%rsp), %rax, %rbp # 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -112(%rsp), %r8, %r12 # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %r12
+ addq %r11, %r13
+ adcq %r9, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %rsi
+ adcq %r15, %rax
+ adcq %rcx, %r8
+ adcq $0, %r12
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %rcx # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -32(%rsp), %r9, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r9
+ mulxq -40(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r12
+ addq %rcx, %r11
+ adcq %rbp, %r9
+ adcq %rbx, %r10
+ adcq %rdi, %r14
+ adcq %rsi, %r15
+ adcq %rax, %r12
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -80(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ mulxq -88(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -96(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -104(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r9, %rax
+ adcq %r10, %rbx
+ adcq %r14, %rdi
+ adcq %r15, %rsi
+ adcq %r12, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r13, %rbp # 8-byte Folded Reload
+ adcq %rax, %r13
+ mulxq -32(%rsp), %r11, %rax # 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -48(%rsp), %r14, %rdi # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -56(%rsp), %rsi, %rdx # 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq $0, %rcx
+ addq %r9, %r13
+ adcq %rbp, %r11
+ adcq %rax, %r10
+ adcq %rbx, %r14
+ adcq %rdi, %rsi
+ adcq %rdx, %rcx
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r8, %rbx # 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -96(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %rbp, %r15
+ mulxq -104(%rsp), %r12, %rbp # 8-byte Folded Reload
+ adcq %rax, %r12
+ mulxq -112(%rsp), %r9, %rax # 8-byte Folded Reload
+ adcq %rbp, %r9
+ adcq $0, %rax
+ addq %r13, %r8
+ adcq %r11, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %r15
+ adcq %rsi, %r12
+ adcq %rcx, %r9
+ adcq $0, %rax
+ movq -16(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ addq %r8, %rcx
+ movq -24(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %r8, %rcx
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq %rbx, %r8
+ movq -32(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq %rdi, %rsi
+ movq -40(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %rdi, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq %r15, %rdi
+ movq -48(%rsp), %rcx # 8-byte Reload
+ mulxq %rcx, %r15, %rbx
+ adcq %r12, %r15
+ movq -56(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r12, %rbp
+ adcq %r9, %r12
+ adcq $0, %rax
+ addq -16(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %rsi # 8-byte Folded Reload
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbx, %r12
+ adcq %rbp, %rax
+ movq %r8, %rbp
+ subq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rsi, %rbx
+ sbbq %r11, %rbx
+ movq %rdi, %r11
+ sbbq %r10, %r11
+ movq %r15, %r10
+ sbbq %r13, %r10
+ movq %r12, %r9
+ sbbq %rcx, %r9
+ movq %rax, %rcx
+ sbbq %r14, %rcx
+ movq %rcx, %rdx
+ sarq $63, %rdx
+ cmovsq %r8, %rbp
+ movq -120(%rsp), %rdx # 8-byte Reload
+ movq %rbp, (%rdx)
+ cmovsq %rsi, %rbx
+ movq %rbx, 8(%rdx)
+ cmovsq %rdi, %r11
+ movq %r11, 16(%rdx)
+ cmovsq %r15, %r10
+ movq %r10, 24(%rdx)
+ cmovsq %r12, %r9
+ movq %r9, 32(%rdx)
+ cmovsq %rax, %rcx
+ movq %rcx, 40(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end84:
+ .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2
+
+ .globl mcl_fp_montRed6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed6Lbmi2,@function
+mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq %r14, %rdx
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r12
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r10, %r11
+ movq 24(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, (%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %r8
+ mulxq %rbp, %r13, %rbp
+ mulxq %rcx, %rax, %r9
+ mulxq %rdi, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r13, %r9
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %rbx, %r11
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r8
+ adcq 40(%rsi), %r11
+ movq %r11, -88(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r12
+ movq %r12, -80(%rsp) # 8-byte Spill
+ movq 88(%rsi), %r10
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rax
+ movq 56(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq $0, %r10
+ movq %r10, -48(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rcx, %rdx
+ imulq -56(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -128(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %r11, %r14 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r10 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %rdi
+ adcq %r11, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rcx, %rdx
+ adcq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r8, %r10
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq -56(%rsp), %r11 # 8-byte Reload
+ imulq %r11, %rdx
+ mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -120(%rsp) # 8-byte Spill
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %r8 # 8-byte Folded Reload
+ addq %rsi, %r8
+ adcq %rcx, %rbp
+ adcq -128(%rsp), %r9 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rax, %rdx
+ adcq %rdi, %r8
+ adcq %r10, %rbp
+ adcq %r14, %r9
+ adcq %r15, %rbx
+ adcq %r13, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %r8, %rdx
+ imulq %r11, %rdx
+ mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %r11 # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %r10, %r14 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r13, %r15 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %r13, %rdi
+ adcq %r10, %r15
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq -96(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %r8, %rdx
+ adcq %rbp, %rcx
+ adcq %r9, %rdi
+ adcq %rbx, %r15
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ adcq $0, %r12
+ movq %rcx, %rdx
+ imulq -56(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq -40(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r10, %rbp
+ mulxq (%rsp), %rsi, %r8 # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ adcq %r10, %r8
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %r10 # 8-byte Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq -72(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rdi, %rbx
+ adcq %r15, %r8
+ adcq %r14, %rbp
+ adcq %r11, %r13
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -80(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq -56(%rsp), %rdx # 8-byte Reload
+ imulq %rbx, %rdx
+ mulxq -24(%rsp), %rax, %r10 # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq %r9, %rsi, %r14
+ mulxq -8(%rsp), %r11, %rdi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %r9 # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %rsi, %r9
+ movq -32(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rsi, %rdi
+ adcq %r14, %rsi
+ mulxq -16(%rsp), %rdx, %r14 # 8-byte Folded Reload
+ adcq %rdi, %rdx
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rbx, %r11
+ adcq %r8, %rax
+ adcq %rbp, %r9
+ adcq %r13, %rsi
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rax, %rcx
+ subq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %r9, %rdi
+ sbbq (%rsp), %rdi # 8-byte Folded Reload
+ movq %rsi, %rbp
+ sbbq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rdx, %rbx
+ sbbq %r15, %rbx
+ movq %r14, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r10, %r15
+ sbbq -24(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r10, %r15
+ testb %r12b, %r12b
+ cmovneq %rax, %rcx
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovneq %r9, %rdi
+ movq %rdi, 8(%rax)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rax)
+ cmovneq %rdx, %rbx
+ movq %rbx, 24(%rax)
+ cmovneq %r14, %r8
+ movq %r8, 32(%rax)
+ movq %r15, 40(%rax)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end85:
+ .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2
+
+ .globl mcl_fp_addPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre6Lbmi2,@function
+mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end86:
+ .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2
+
+ .globl mcl_fp_subPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre6Lbmi2,@function
+mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end87:
+ .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2
+
+ .globl mcl_fp_shr1_6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_6Lbmi2,@function
+mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2
+# BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+.Lfunc_end88:
+ .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2
+
+ .globl mcl_fp_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add6Lbmi2,@function
+mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+.LBB89_2: # %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end89:
+ .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2
+
+ .globl mcl_fp_addNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF6Lbmi2,@function
+mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end90:
+ .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2
+
+ .globl mcl_fp_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub6Lbmi2,@function
+mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB91_2
+# BB#1: # %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+.LBB91_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end91:
+ .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2
+
+ .globl mcl_fp_subNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF6Lbmi2,@function
+mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rsi), %r15
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r14
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r10
+ sbbq 24(%rdx), %r9
+ sbbq 32(%rdx), %r8
+ sbbq 40(%rdx), %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ addq %rbx, %rbx
+ movq %rdx, %rsi
+ adcq %rsi, %rsi
+ andq 8(%rcx), %rsi
+ movq %r15, %rax
+ shrq $63, %rax
+ orq %rbx, %rax
+ andq (%rcx), %rax
+ movq 40(%rcx), %r12
+ andq %rdx, %r12
+ movq 32(%rcx), %r13
+ andq %rdx, %r13
+ movq 24(%rcx), %rbx
+ andq %rdx, %rbx
+ andq 16(%rcx), %rdx
+ addq %r11, %rax
+ movq %rax, (%rdi)
+ adcq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r8, %r13
+ movq %r13, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end92:
+ .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2
+
+ .globl mcl_fpDbl_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add6Lbmi2,@function
+mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end93:
+ .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2
+
+ .globl mcl_fpDbl_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6Lbmi2,@function
+mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2
+
+ .globl mcl_fp_mulUnitPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7Lbmi2,@function
+mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ mulxq 48(%rsi), %r8, %r11
+ mulxq 40(%rsi), %r9, %r13
+ mulxq 32(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r12, %r14
+ mulxq (%rsi), %r15, %rbx
+ addq %r12, %rbx
+ mulxq 24(%rsi), %r12, %rax
+ mulxq 16(%rsi), %rdx, %rsi
+ movq %r15, (%rdi)
+ movq %rbx, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r12, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r10, %rax
+ movq %rax, 32(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 40(%rdi)
+ adcq %r8, %r13
+ movq %r13, 48(%rdi)
+ adcq $0, %r11
+ movq %r11, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2
+
+ .globl mcl_fpDbl_mulPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7Lbmi2,@function
+mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %r14, -8(%rsp) # 8-byte Spill
+ movq %rsi, %r8
+ movq %rdi, %r13
+ movq %r13, -16(%rsp) # 8-byte Spill
+ movq (%r8), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%r14), %rsi
+ movq %rax, %rdx
+ mulxq %rsi, %rbp, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %rdx, %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq 24(%r8), %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rbp, %rcx
+ mulxq %rsi, %rbx, %rbp
+ adcq %rax, %rbx
+ movq %rdi, %rdx
+ mulxq %rsi, %r12, %rax
+ adcq %rbp, %r12
+ movq 32(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %rax, %r9
+ movq 40(%r8), %rdi
+ movq %rdi, %rdx
+ mulxq %rsi, %r10, %rax
+ adcq %rbp, %r10
+ movq 48(%r8), %r15
+ movq %r15, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ movq -64(%rsp), %rax # 8-byte Reload
+ movq %rax, (%r13)
+ adcq $0, %r11
+ movq 8(%r14), %r13
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %r14, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ addq %rcx, %r14
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rbx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %r12, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rbp, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq %r9, %rbp
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rax, %r9
+ adcq %r10, %rax
+ movq %rdi, %rdx
+ mulxq %r13, %r10, %rdi
+ adcq %rsi, %r10
+ movq %r15, %rdx
+ mulxq %r13, %r13, %rdx
+ adcq %r11, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq %r9, %r10
+ movq -16(%rsp), %rsi # 8-byte Reload
+ movq %r14, 8(%rsi)
+ adcq %rdi, %r13
+ adcq %rdx, %r12
+ movq (%r8), %rsi
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ movq 8(%r8), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdi
+ movq %rsi, %rdx
+ mulxq %rdi, %r9, %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ addq %rcx, %r9
+ movq %r11, %rdx
+ mulxq %rdi, %r14, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq %rbx, %r14
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %rsi, %rcx
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ adcq %rbp, %rsi
+ movq 24(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq %rax, %rbp
+ movq 32(%r8), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ adcq %r10, %r11
+ movq 40(%r8), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rdi, %r15, %rax
+ adcq %r13, %r15
+ movq 48(%r8), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %rcx, %rdx
+ adcq %r12, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rax, %rcx
+ adcq %rdx, %rbx
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq %r9, 16(%rax)
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ addq %r14, %r9
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rsi, %rax
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r14, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %rbp, %r14
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r11, %r10
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %rsi
+ adcq %r15, %rbp
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r11, %r15
+ adcq %rcx, %r11
+ movq %r13, %rdx
+ mulxq %rdi, %r13, %rcx
+ adcq %rbx, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq -24(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %r10 # 8-byte Folded Reload
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq %rsi, %r11
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq %r9, 24(%rdi)
+ adcq %r15, %r13
+ adcq %rcx, %r12
+ movq (%r8), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%r8), %rbx
+ movq %rbx, -24(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rcx
+ mulxq %rcx, %rsi, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ addq %rax, %rsi
+ movq %rbx, %rdx
+ mulxq %rcx, %r9, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %r14, %r9
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq 24(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rcx, %r15, %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r15
+ movq 32(%r8), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %rbp
+ adcq %r11, %r10
+ movq 40(%r8), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %rbx
+ adcq %r13, %r11
+ movq 48(%r8), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rcx, %r14, %rcx
+ adcq %r12, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %rbx, %r14
+ adcq %rcx, %r12
+ movq %rsi, 32(%rdi)
+ movq -8(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r13, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ addq %r9, %r13
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rcx, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rax, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r15, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r10, %rbx
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %r15
+ adcq %r11, %rbp
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r9, %r11
+ adcq %r14, %r9
+ movq -72(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ adcq %r12, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -32(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r15, %r9
+ movq -16(%rsp), %r14 # 8-byte Reload
+ movq %r13, 40(%r14)
+ adcq %r11, %r10
+ adcq %rdx, %rdi
+ movq 48(%rsi), %rdx
+ mulxq (%r8), %r11, %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ addq %rcx, %r11
+ mulxq 8(%r8), %rsi, %r15
+ adcq %rax, %rsi
+ mulxq 16(%r8), %rcx, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rcx
+ mulxq 24(%r8), %rbx, %r12
+ adcq %rbp, %rbx
+ mulxq 32(%r8), %rbp, %r13
+ adcq %r9, %rbp
+ mulxq 40(%r8), %rax, %r9
+ adcq %r10, %rax
+ mulxq 48(%r8), %rdx, %r8
+ adcq %rdi, %rdx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -8(%rsp), %rsi # 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %r11, 48(%r14)
+ movq %rsi, 56(%r14)
+ movq %rcx, 64(%r14)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq %r12, %rbp
+ movq %rbp, 80(%r14)
+ adcq %r13, %rax
+ movq %rax, 88(%r14)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r14)
+ adcq %r8, %r10
+ movq %r10, 104(%r14)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2
+
+ .globl mcl_fpDbl_sqrPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7Lbmi2,@function
+mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r8, %r10
+ movq 24(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rdi
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %r12, %rdi
+ adcq %rbp, %r8
+ movq %rbx, %rdx
+ mulxq %rcx, %rbp, %r9
+ adcq %r10, %rbp
+ movq 32(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %r14
+ adcq %r9, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %r15
+ adcq %r14, %r10
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r15, %rcx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -48(%rsp), %rbx # 8-byte Reload
+ movq %rbx, (%rdx)
+ adcq $0, %r13
+ addq %r12, %rdi
+ movq %rax, %rdx
+ mulxq %rax, %r12, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r8, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rbp, %r8
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r9, %rbp
+ adcq %r11, %r9
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r10, %r15
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r11, %rbx
+ adcq %rcx, %r11
+ movq %r14, %rdx
+ mulxq %rax, %r14, %rax
+ adcq %r13, %r14
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r9 # 8-byte Folded Reload
+ adcq %rbp, %r15
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rdi, 8(%rcx)
+ adcq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq %rbx, %r14
+ adcq %rax, %r13
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ addq %r12, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rbx, %r10, %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ adcq %r8, %r10
+ movq %rbx, %rdx
+ mulxq %rbx, %r12, %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq %r9, %r12
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %r8, %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rbx, %rcx, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r11, %rcx
+ movq 40(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rbx, %rbp, %r11
+ adcq %r14, %rbp
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rbx, %r9, %rdx
+ adcq %r13, %r9
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r11, %r9
+ adcq %rdx, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %r10, %rdi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r11, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r12, %r11
+ adcq %r8, %r15
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq %rcx, %r8
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r13, %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq %rbp, %r13
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r12, %rbp
+ adcq %r9, %r12
+ movq %r14, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %rbx, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rbx # 8-byte Reload
+ movq %rbx, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq %rax, %r10
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ addq %r11, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rbx, %r9, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %r15, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rbx, %r15, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rbx, %r8, %rbp
+ adcq %r13, %r8
+ movq %rbx, %rdx
+ mulxq %rbx, %r13, %r14
+ adcq %r12, %r13
+ movq 40(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %rdx, %rdi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq %rdx, %rcx
+ movq 48(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rbx, %r11, %rdx
+ adcq %r10, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %r11
+ adcq %rdx, %r12
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r14, %rdi
+ addq %r9, %r14
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r15, %rbx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r8, %rbp
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r10, %r15
+ adcq %r13, %r10
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rax, %rdx
+ mulxq %rax, %r9, %r13
+ adcq %r11, %r9
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rax, %r11
+ adcq %r12, %rax
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdi, %rbx
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq %rdx, 32(%rdi)
+ movq %r14, 40(%rdi)
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r13, %rax
+ adcq %r11, %r8
+ movq 48(%rsi), %rdx
+ mulxq (%rsi), %r12, %r11
+ addq %rbx, %r12
+ mulxq 8(%rsi), %rbx, %r14
+ adcq %rbp, %rbx
+ mulxq 16(%rsi), %rbp, %r15
+ adcq %r10, %rbp
+ mulxq 24(%rsi), %rdi, %r10
+ adcq %rcx, %rdi
+ mulxq 32(%rsi), %rcx, %r13
+ adcq %r9, %rcx
+ mulxq 40(%rsi), %rsi, %r9
+ adcq %rax, %rsi
+ mulxq %rdx, %rdx, %rax
+ adcq %r8, %rdx
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %r11, %rbx
+ adcq %r14, %rbp
+ movq -8(%rsp), %r11 # 8-byte Reload
+ movq %r12, 48(%r11)
+ movq %rbx, 56(%r11)
+ movq %rbp, 64(%r11)
+ adcq %r15, %rdi
+ movq %rdi, 72(%r11)
+ adcq %r10, %rcx
+ movq %rcx, 80(%r11)
+ adcq %r13, %rsi
+ movq %rsi, 88(%r11)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r11)
+ adcq %rax, %r8
+ movq %r8, 104(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2
+
+ .globl mcl_fp_mont7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont7Lbmi2,@function
+mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rdx, %r8
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ mulxq %rax, %rdx, %r9
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %r11, -64(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -56(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, -40(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %rdi
+ movq %r11, %rdx
+ mulxq %rax, %r14, %rbp
+ movq %r10, %rdx
+ mulxq %rax, %r12, %rbx
+ movq %rsi, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r15, %rdx
+ mulxq %rax, %r15, %r11
+ addq %r10, %r11
+ adcq %r12, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r14, %rbx
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -80(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, %r13
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rdi
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r9
+ movq 16(%rcx), %rsi
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ mulxq %rsi, %r14, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %rax
+ mulxq %rdi, %r8, %r12
+ addq %rsi, %r12
+ adcq %r14, %rax
+ movq %rax, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r10, %r14
+ adcq %rbp, %r10
+ adcq %rbx, %r14
+ movq 40(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %rdi
+ adcq %r9, %rbp
+ movq 48(%rcx), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ adcq %rdi, %rax
+ adcq $0, %rbx
+ addq %r15, %r8
+ adcq %r11, %r12
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq %r13, %rbx
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -24(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -128(%rsp) # 8-byte Spill
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r9, %r8 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ addq %r9, %rcx
+ movq %rcx, %rdi
+ mulxq -56(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq %rcx, %rsi
+ mulxq -64(%rsp), %r13, %rcx # 8-byte Folded Reload
+ adcq %r9, %r13
+ mulxq -72(%rsp), %r8, %r15 # 8-byte Folded Reload
+ adcq %rcx, %r8
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq -104(%rsp), %r9 # 8-byte Reload
+ addq %r12, %r9
+ movq %r9, -104(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %r12
+ adcq %r10, %rsi
+ movq %rsi, -128(%rsp) # 8-byte Spill
+ adcq %r14, %r13
+ adcq %rbp, %r8
+ adcq %rax, %r15
+ adcq %rbx, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r11, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %r10, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq 24(%rsp), %r14, %r9 # 8-byte Folded Reload
+ addq %rdi, %r9
+ mulxq 40(%rsp), %rbp, %r11 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ mulxq 48(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rsi, %rax
+ adcq %r10, %rcx
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r14 # 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -128(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq %r8, %rbx
+ adcq %r15, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, -112(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %rdi, %r10 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r8, %r15 # 8-byte Folded Reload
+ addq %rsi, %r15
+ adcq %rdi, %r13
+ mulxq -64(%rsp), %r12, %rsi # 8-byte Folded Reload
+ adcq %r10, %r12
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rsi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -104(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %r8
+ movq %r8, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r15
+ adcq %r11, %r13
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %rbx
+ movq %r8, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r15, %r8
+ adcq %r13, %rbp
+ adcq %r12, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -88(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %r10, %r11 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -64(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %r11, %rbx
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ movq -104(%rsp), %rdi # 8-byte Reload
+ addq %r8, %rdi
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %rbx
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r12, %r8
+ adcq %r15, %rbp
+ adcq %rbx, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 32(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r11 # 8-byte Folded Reload
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %r10, %r13 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -64(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %r13, %r10
+ mulxq -72(%rsp), %r13, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r13
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %r10
+ adcq %rax, %r13
+ adcq %rcx, %r14
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -128(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r9, %r11 # 8-byte Folded Reload
+ addq %rbp, %r11
+ mulxq 40(%rsp), %rbp, %r8 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r8
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r12, %r11
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %r13, %rsi
+ adcq %r14, %rax
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, -104(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -128(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %rbx, %r10 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r9, %r12 # 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %rbx, %r13
+ mulxq -64(%rsp), %r15, %rdi # 8-byte Folded Reload
+ adcq %r10, %r15
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rdi # 8-byte Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %r9
+ movq %r9, -112(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r8, %r13
+ adcq %rsi, %r15
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %rbx # 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq 40(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq %rsi, %r8
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq -128(%rsp), %rsi # 8-byte Folded Reload
+ movq -96(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq %r13, %r8
+ adcq %r15, %r9
+ adcq %r10, %rdi
+ adcq %r14, %rcx
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq -104(%rsp), %r12 # 8-byte Reload
+ adcq $0, %r12
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -24(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %r13, %rbp # 8-byte Folded Reload
+ mulxq -56(%rsp), %r14, %r15 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rax, %r11 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %r10 # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %r15
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ movq -32(%rsp), %r14 # 8-byte Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ movq -24(%rsp), %rdx # 8-byte Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ movq -40(%rsp), %r13 # 8-byte Reload
+ addq %rbx, %r13
+ movq %r13, -40(%rsp) # 8-byte Spill
+ adcq %r8, %r10
+ adcq %r9, %r11
+ adcq %rdi, %r15
+ adcq %rcx, %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq %rsi, %r14
+ movq %r14, -32(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r12, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ movq -8(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq 16(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 32(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ mulxq 24(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq 40(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq %rbp, %r9
+ mulxq 48(%rsp), %rbp, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq 8(%rsp), %rsi, %r14 # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq (%rsp), %rdx, %rbx # 8-byte Folded Reload
+ adcq %r14, %rdx
+ adcq $0, %rbx
+ andl $1, %edi
+ addq -40(%rsp), %r13 # 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r11, %rcx
+ adcq %r15, %r9
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -24(%rsp), %rdx # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rax, %r8
+ subq 24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r9, %r11
+ sbbq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq 16(%rsp), %r14 # 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq 48(%rsp), %r15 # 8-byte Folded Reload
+ movq %rdx, %r12
+ sbbq 8(%rsp), %r12 # 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq (%rsp), %r13 # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %r13
+ testb %dil, %dil
+ cmovneq %rax, %r8
+ movq -120(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rcx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %r9, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %rdx, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end98:
+ .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2
+
+ .globl mcl_fp_montNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF7Lbmi2,@function
+mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r8, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %r15, %rax
+ adcq %rbx, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rbp, %rbx, %rdi
+ adcq %rax, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rbp, %r11, %rax
+ adcq %rdi, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ mulxq %rbp, %r9, %rdi
+ adcq %rax, %r9
+ movq 48(%rsi), %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ mulxq %rbp, %r10, %rbp
+ adcq %rdi, %r10
+ adcq $0, %rbp
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %r8, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ addq %r8, %rax
+ movq 8(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq 16(%rcx), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %r13
+ adcq %r15, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %rax
+ adcq %rbx, %r12
+ movq 32(%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ mulxq %rdi, %r15, %rbx
+ adcq %r11, %r15
+ movq 40(%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ mulxq %rdi, %r14, %rdi
+ adcq %r9, %r14
+ movq 48(%rcx), %rcx
+ movq %rcx, 32(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %rcx
+ adcq %r10, %r11
+ adcq $0, %rbp
+ addq -96(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -96(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r13, %r12
+ adcq %rax, %r15
+ adcq %rbx, %r14
+ adcq %rdi, %r11
+ adcq %rcx, %rbp
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -48(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq -32(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -56(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -64(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rbx # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq $0, %r10
+ addq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r12, %rcx
+ adcq %r15, %rsi
+ adcq %r14, %rdi
+ adcq %r11, %r8
+ adcq %rbp, %r9
+ adcq $0, %r10
+ movq %r13, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rbp, %rbx # 8-byte Folded Reload
+ movq %rbx, -96(%rsp) # 8-byte Spill
+ addq %r13, %rbp
+ mulxq 16(%rsp), %rbp, %r14 # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq (%rsp), %rax, %r11 # 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq -8(%rsp), %r12, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r12
+ mulxq -16(%rsp), %r15, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -24(%rsp), %r13, %rdi # 8-byte Folded Reload
+ adcq %r8, %r13
+ mulxq 32(%rsp), %rsi, %rdx # 8-byte Folded Reload
+ adcq %r9, %rsi
+ adcq $0, %r10
+ addq -96(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r14, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ adcq %r11, %r12
+ adcq %rcx, %r15
+ adcq %rbx, %r13
+ adcq %rdi, %rsi
+ adcq %rdx, %r10
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r14, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -56(%rsp), %rbp, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -64(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -72(%rsp), %rax, %r9 # 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -80(%rsp), %r8, %rcx # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq %r13, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r9
+ adcq $0, %r11
+ movq %r14, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ addq %r14, %rsi
+ mulxq 16(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq (%rsp), %rdi, %r15 # 8-byte Folded Reload
+ adcq %rbp, %rdi
+ mulxq -8(%rsp), %rcx, %rbp # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ mulxq -16(%rsp), %r14, %rbx # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -24(%rsp), %r12, %rax # 8-byte Folded Reload
+ adcq %r8, %r12
+ mulxq 32(%rsp), %r10, %rdx # 8-byte Folded Reload
+ adcq %r9, %r10
+ adcq $0, %r11
+ addq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq %r13, %rdi
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ adcq %r15, %rcx
+ adcq %rbp, %r14
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rdx, %r11
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r15, %rbp # 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r14, %rsi
+ adcq %r12, %rdi
+ adcq %r10, %r8
+ adcq %r11, %r9
+ adcq $0, %r13
+ movq %r15, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ addq %r15, %rcx
+ mulxq 16(%rsp), %rcx, %r11 # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ mulxq (%rsp), %rbp, %r10 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ mulxq -8(%rsp), %rax, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rax
+ mulxq -16(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r14
+ mulxq -24(%rsp), %r15, %rdi # 8-byte Folded Reload
+ adcq %r8, %r15
+ mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r13
+ addq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rdi, %r12
+ adcq %rdx, %r13
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -32(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rsi, %r8
+ mulxq -56(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rdi, %rbx
+ mulxq -64(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -72(%rsp), %rsi, %r9 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r10
+ addq %rcx, %r11
+ adcq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r14, %rbp
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r13, %r9
+ adcq $0, %r10
+ movq %r11, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ addq %r11, %rcx
+ mulxq 16(%rsp), %rcx, %r13 # 8-byte Folded Reload
+ adcq %r8, %rcx
+ mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload
+ adcq %rbx, %rax
+ mulxq -8(%rsp), %rbx, %r11 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ mulxq -16(%rsp), %r14, %rbp # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -24(%rsp), %r15, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r10
+ addq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ adcq %r11, %r14
+ adcq %rbp, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r10
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r11, %rbp # 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq %rcx, %r11
+ adcq -96(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r14, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r8
+ adcq %r10, %r9
+ adcq $0, %r13
+ movq %r11, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ addq %r11, %rcx
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ adcq %rbp, %rcx
+ mulxq (%rsp), %rax, %rbp # 8-byte Folded Reload
+ movq %rbp, -128(%rsp) # 8-byte Spill
+ adcq %rbx, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r14, %rbp # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -16(%rsp), %r11, %r12 # 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -24(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %r8, %r10
+ mulxq 32(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %r9, %rdi
+ adcq $0, %r13
+ addq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq -96(%rsp), %rcx # 8-byte Reload
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %r12, %r10
+ adcq %rbx, %rdi
+ adcq %rax, %r13
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -48(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ mulxq -32(%rsp), %r8, %rax # 8-byte Folded Reload
+ addq %rbp, %rax
+ mulxq -56(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %r9, %rbx
+ mulxq -64(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rcx, %rbp
+ mulxq -72(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ adcq %r9, %rcx
+ mulxq -80(%rsp), %r15, %rsi # 8-byte Folded Reload
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ adcq %r12, %r15
+ mulxq -88(%rsp), %r12, %r9 # 8-byte Folded Reload
+ adcq -32(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq %r11, %rbp
+ adcq %r10, %rcx
+ adcq %rdi, %r15
+ adcq %r13, %r12
+ adcq $0, %r9
+ movq 8(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq 24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rsi, 8(%rsp) # 8-byte Spill
+ addq %r8, %rdi
+ mulxq 16(%rsp), %r8, %rsi # 8-byte Folded Reload
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ adcq %rax, %r8
+ movq (%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %rsi, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %rbx, %rsi
+ movq -8(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rdi, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq %rbp, %rdi
+ movq -16(%rsp), %rbp # 8-byte Reload
+ mulxq %rbp, %rax, %rbx
+ movq %rbx, -56(%rsp) # 8-byte Spill
+ adcq %rcx, %rax
+ movq -24(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rcx, %r13
+ adcq %r15, %rcx
+ mulxq 32(%rsp), %rdx, %r15 # 8-byte Folded Reload
+ adcq %r12, %rdx
+ adcq $0, %r9
+ addq 8(%rsp), %r8 # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq %r15, %r9
+ movq %r8, %r13
+ subq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 16(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq %r11, %r10
+ movq %rax, %r11
+ sbbq %r14, %r11
+ movq %rcx, %r14
+ sbbq %rbp, %r14
+ movq %rdx, %r15
+ sbbq %rbx, %r15
+ movq %r9, %rbp
+ sbbq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r8, %r13
+ movq -104(%rsp), %rbx # 8-byte Reload
+ movq %r13, (%rbx)
+ cmovsq %rsi, %r12
+ movq %r12, 8(%rbx)
+ cmovsq %rdi, %r10
+ movq %r10, 16(%rbx)
+ cmovsq %rax, %r11
+ movq %r11, 24(%rbx)
+ cmovsq %rcx, %r14
+ movq %r14, 32(%rbx)
+ cmovsq %rdx, %r15
+ movq %r15, 40(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 48(%rbx)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end99:
+ .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2
+
+ .globl mcl_fp_montRed7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed7Lbmi2,@function
+mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $64, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rsi), %rdx
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ imulq %rax, %rdx
+ movq 48(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rax
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r8
+ movq %r8, (%rsp) # 8-byte Spill
+ movq 32(%rcx), %r9
+ movq %r9, 24(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, 56(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %r8, %r10, %r11
+ mulxq %r9, %r14, %r9
+ mulxq %rbp, %r8, %r13
+ mulxq %rdi, %rcx, %r12
+ mulxq %rax, %rbp, %r15
+ mulxq %rbx, %rdx, %rdi
+ addq %rbp, %rdi
+ adcq %rcx, %r15
+ adcq %r8, %r12
+ adcq %r14, %r13
+ adcq %r10, %r9
+ adcq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq 48(%rsp), %rdx # 8-byte Folded Reload
+ adcq 8(%rsi), %rdi
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r12
+ adcq 32(%rsi), %r13
+ adcq 40(%rsi), %r9
+ movq %r9, -96(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r11
+ movq %r11, -72(%rsp) # 8-byte Spill
+ adcq 56(%rsi), %rcx
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rbp
+ movq 80(%rsi), %rbx
+ movq 72(%rsi), %rcx
+ movq 64(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ movq -24(%rsp), %r9 # 8-byte Reload
+ imulq %r9, %rdx
+ mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -120(%rsp) # 8-byte Spill
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ movq 24(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rcx, %rbp
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %r10, %r14 # 8-byte Folded Reload
+ mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq %rax, %rcx, %r8
+ mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r11
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %r12, %r8
+ adcq %r13, %r11
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq $0, -80(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq %r9, %rdx
+ mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ movq (%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rsi, %rcx
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ mulxq %rbx, %rcx, %r13
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ mulxq 56(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r10 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %rdi, %r10
+ adcq %rbx, %r9
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r13 # 8-byte Folded Reload
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ movq -48(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r8, %rcx
+ adcq %r11, %r10
+ adcq %r14, %r9
+ adcq %rbp, %r12
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq %r15, %rsi, %rax
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %r8, %r15 # 8-byte Folded Reload
+ mulxq 8(%rsp), %r14, %rbp # 8-byte Folded Reload
+ mulxq 56(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rdi, %r11
+ adcq %r14, %rbx
+ adcq %r8, %rbp
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq -64(%rsp), %rdi # 8-byte Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq -56(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %r10, %rax
+ adcq %r9, %r11
+ adcq %r12, %rbx
+ adcq %r13, %rbp
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq 16(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %r12, %r13 # 8-byte Folded Reload
+ mulxq 56(%rsp), %r8, %r14 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r9 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rdi # 8-byte Folded Reload
+ addq %rsi, %rdi
+ adcq %r8, %r9
+ adcq %r12, %r14
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ movq -48(%rsp), %r8 # 8-byte Reload
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ movq -40(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r11, %rdi
+ adcq %rbx, %r9
+ adcq %rbp, %r14
+ adcq %r15, %r13
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -48(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -40(%rsp) # 8-byte Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rdi, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r10, %rcx, %rax
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %rax, %rcx # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rax, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ movq -16(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rax, %rbp
+ movq 32(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rdx, %r8
+ addq %rax, %r8
+ adcq %rsi, %rbp
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ movq -56(%rsp), %rsi # 8-byte Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq -32(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdi, %rdx
+ adcq %r9, %r8
+ adcq %r14, %rbp
+ adcq %r13, %r11
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq -24(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq %r12, %rax, %r13
+ mulxq %r10, %rcx, %rdi
+ mulxq %rbx, %r12, %r14
+ addq %rcx, %r14
+ mulxq 56(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %rax, %r10
+ mulxq 24(%rsp), %rax, %r9 # 8-byte Folded Reload
+ adcq %r13, %rax
+ mulxq (%rsp), %rdi, %r13 # 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq 16(%rsp), %rdx, %rsi # 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq $0, %rsi
+ addq %r8, %r12
+ adcq %rbp, %r14
+ adcq %r11, %rcx
+ adcq %r15, %r10
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq %r14, %rbp
+ subq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rcx, %r13
+ sbbq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r10, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %rax, %r9
+ sbbq 8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rdi, %r11
+ sbbq 24(%rsp), %r11 # 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq (%rsp), %r15 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 16(%rsp), %r12 # 8-byte Folded Reload
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %r12
+ testb %bl, %bl
+ cmovneq %r14, %rbp
+ movq -88(%rsp), %rsi # 8-byte Reload
+ movq %rbp, (%rsi)
+ cmovneq %rcx, %r13
+ movq %r13, 8(%rsi)
+ cmovneq %r10, %r8
+ movq %r8, 16(%rsi)
+ cmovneq %rax, %r9
+ movq %r9, 24(%rsi)
+ cmovneq %rdi, %r11
+ movq %r11, 32(%rsi)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rsi)
+ movq %r12, 48(%rsi)
+ addq $64, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end100:
+ .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2
+
+ .globl mcl_fp_addPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre7Lbmi2,@function
+mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end101:
+ .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2
+
+ .globl mcl_fp_subPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre7Lbmi2,@function
+mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end102:
+ .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2
+
+ .globl mcl_fp_shr1_7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_7Lbmi2,@function
+mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2
+# BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+.Lfunc_end103:
+ .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2
+
+ .globl mcl_fp_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add7Lbmi2,@function
+mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+.LBB104_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end104:
+ .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2
+
+ .globl mcl_fp_addNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF7Lbmi2,@function
+mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end105:
+ .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2
+
+ .globl mcl_fp_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub7Lbmi2,@function
+mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB106_2
+# BB#1: # %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+.LBB106_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end106:
+ .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2
+
+ .globl mcl_fp_subNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF7Lbmi2,@function
+mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r12
+ movq 40(%rsi), %rax
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %r14
+ movq 8(%rsi), %r15
+ subq (%rdx), %r14
+ sbbq 8(%rdx), %r15
+ sbbq 16(%rdx), %r11
+ sbbq 24(%rdx), %r10
+ sbbq 32(%rdx), %r9
+ sbbq 40(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %r12
+ movq %r12, %rax
+ sarq $63, %rax
+ movq %rax, %rsi
+ shldq $1, %r12, %rsi
+ andq (%r8), %rsi
+ movq 48(%r8), %r13
+ andq %rax, %r13
+ movq 40(%r8), %rbx
+ andq %rax, %rbx
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ movq 24(%r8), %rbp
+ andq %rax, %rbp
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %r14, %rsi
+ adcq %r15, %rax
+ movq %rsi, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %r11, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 32(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 40(%rdi)
+ adcq %r12, %r13
+ movq %r13, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end107:
+ .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2
+
+ .globl mcl_fpDbl_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add7Lbmi2,@function
+mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -24(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end108:
+ .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2
+
+ .globl mcl_fpDbl_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7Lbmi2,@function
+mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv512x64,@function
+.LmulPv512x64: # @mulPv512x64
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %rdx
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ adcq $0, %rdx
+ movq %rdx, 64(%rdi)
+ movq %rdi, %rax
+ retq
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8Lbmi2,@function
+mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2
+# BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2
+
+ .globl mcl_fpDbl_mulPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8Lbmi2,@function
+mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %rbx
+ movq %rsi, %r15
+ movq %rdi, %r14
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ leaq 64(%r14), %rdi
+ leaq 32(%r15), %rsi
+ leaq 32(%rbx), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rcx
+ movq (%rbx), %rdx
+ movq 8(%rbx), %rsi
+ addq 32(%rbx), %rdx
+ adcq 40(%rbx), %rsi
+ adcq 16(%rbx), %rcx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rdi
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rdi
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -176(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rcx, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -192(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -200(%rbp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rdi, -112(%rbp)
+ movq %rdx, -168(%rbp)
+ movq %rsi, -160(%rbp)
+ movq %rcx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -208(%rbp) # 8-byte Spill
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ addq -200(%rbp), %r12 # 8-byte Folded Reload
+ adcq -192(%rbp), %rbx # 8-byte Folded Reload
+ adcq -184(%rbp), %r13 # 8-byte Folded Reload
+ movq -216(%rbp), %r10 # 8-byte Reload
+ adcq -176(%rbp), %r10 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -208(%rbp), %rdx # 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -72(%rbp), %r12
+ adcq -64(%rbp), %rbx
+ adcq -56(%rbp), %r13
+ adcq -48(%rbp), %r10
+ adcq %rax, %r15
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -184(%rbp) # 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -176(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -192(%rbp) # 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -200(%rbp) # 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -208(%rbp) # 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -224(%rbp) # 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -232(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -208(%rbp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -216(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -224(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -232(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2
+
+ .globl mcl_fpDbl_sqrPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8Lbmi2,@function
+mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %r14
+ movq %rdi, %rbx
+ movq %r14, %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ leaq 64(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ movq %rsi, %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq (%r14), %r12
+ movq 8(%r14), %r15
+ addq 32(%r14), %r12
+ adcq 40(%r14), %r15
+ pushfq
+ popq %rax
+ movq %r12, -136(%rbp)
+ movq %r12, -168(%rbp)
+ addq %r12, %r12
+ movq %r15, -128(%rbp)
+ movq %r15, -160(%rbp)
+ adcq %r15, %r15
+ pushfq
+ popq %rcx
+ movq 56(%r14), %r13
+ movq 48(%r14), %rdx
+ pushq %rax
+ popfq
+ adcq 16(%r14), %rdx
+ adcq 24(%r14), %r13
+ pushfq
+ popq %r8
+ pushfq
+ popq %rsi
+ pushfq
+ popq %rdi
+ sbbq %rax, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ xorl %eax, %eax
+ pushq %rdi
+ popfq
+ cmovaeq %rax, %r15
+ movq %r15, -176(%rbp) # 8-byte Spill
+ cmovaeq %rax, %r12
+ movq %rdx, -120(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %rdx, %r15
+ pushq %rcx
+ popfq
+ adcq %r15, %r15
+ movq %r13, %r14
+ movq %r13, -112(%rbp)
+ movq %r13, -144(%rbp)
+ adcq %r13, %r13
+ pushq %rsi
+ popfq
+ cmovaeq %rax, %r13
+ cmovaeq %rax, %r15
+ shrq $63, %r14
+ pushq %r8
+ popfq
+ cmovaeq %rax, %r14
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq -184(%rbp), %rax # 8-byte Reload
+ andl $1, %eax
+ addq -72(%rbp), %r12
+ movq -176(%rbp), %r8 # 8-byte Reload
+ adcq -64(%rbp), %r8
+ adcq -56(%rbp), %r15
+ adcq -48(%rbp), %r13
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%rbx), %rsi
+ sbbq 8(%rbx), %rdx
+ sbbq 16(%rbx), %rcx
+ sbbq 24(%rbx), %rax
+ movq 32(%rbx), %r10
+ movq %r10, -184(%rbp) # 8-byte Spill
+ movq 40(%rbx), %r9
+ movq %r9, -176(%rbp) # 8-byte Spill
+ sbbq %r10, %r12
+ sbbq %r9, %r8
+ movq %r8, %r10
+ movq 48(%rbx), %r8
+ movq %r8, -192(%rbp) # 8-byte Spill
+ sbbq %r8, %r15
+ movq 56(%rbx), %r8
+ movq %r8, -200(%rbp) # 8-byte Spill
+ sbbq %r8, %r13
+ sbbq $0, %rdi
+ movq 64(%rbx), %r11
+ subq %r11, %rsi
+ movq 72(%rbx), %r8
+ movq %r8, -208(%rbp) # 8-byte Spill
+ sbbq %r8, %rdx
+ movq 80(%rbx), %r8
+ movq %r8, -216(%rbp) # 8-byte Spill
+ sbbq %r8, %rcx
+ movq 88(%rbx), %r8
+ movq %r8, -224(%rbp) # 8-byte Spill
+ sbbq %r8, %rax
+ movq 96(%rbx), %r8
+ movq %r8, -232(%rbp) # 8-byte Spill
+ sbbq %r8, %r12
+ movq 104(%rbx), %r14
+ sbbq %r14, %r10
+ movq 112(%rbx), %r8
+ sbbq %r8, %r15
+ movq 120(%rbx), %r9
+ sbbq %r9, %r13
+ sbbq $0, %rdi
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%rbx)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%rbx)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%rbx)
+ adcq %r11, %r12
+ movq %rax, 56(%rbx)
+ movq %r12, 64(%rbx)
+ adcq -208(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 72(%rbx)
+ adcq -216(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 80(%rbx)
+ adcq -224(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rbx)
+ adcq -232(%rbp), %rdi # 8-byte Folded Reload
+ movq %rdi, 96(%rbx)
+ adcq $0, %r14
+ movq %r14, 104(%rbx)
+ adcq $0, %r8
+ movq %r8, 112(%rbx)
+ adcq $0, %r9
+ movq %r9, 120(%rbx)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2
+
+ .globl mcl_fp_mont8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont8Lbmi2,@function
+mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp # imm = 0x4E8
+ movq %rcx, %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 8(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 48(%rsp) # 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq 88(%rsp), %rbp # 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, 88(%rsp) # 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 96(%rsp), %rbx # 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %rcx # 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 72(%rsp), %rsi # 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r8 # 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 40(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq 88(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp # imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end114:
+ .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2
+
+ .globl mcl_fp_montNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF8Lbmi2,@function
+mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp # imm = 0x4D8
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 1088(%rsp), %r15
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 952(%rsp), %r13
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 64(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 944(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 808(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 664(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 656(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 512(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 376(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 368(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 232(%rsp), %r12
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 224(%rsp), %rcx
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 88(%rsp), %rbp
+ movq 48(%rsp), %r11 # 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 64(%rsp), %rsi # 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 64(%rsp) # 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq 80(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 56(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 72(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp # imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end115:
+ .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2
+
+ .globl mcl_fp_montRed8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed8Lbmi2,@function
+mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp # imm = 0x308
+ movq %rdx, %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq %rdi, 72(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 128(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 184(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 704(%rsp), %rdi
+ callq .LmulPv512x64
+ addq 704(%rsp), %r15
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 144(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 192(%rsp) # 8-byte Folded Spill
+ movq 160(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 176(%rsp) # 8-byte Folded Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ movq 136(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi # 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rcx # 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 168(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 136(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 112(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 560(%rsp), %r14
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 88(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 176(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 168(%rsp) # 8-byte Spill
+ movq 120(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 136(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 128(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 488(%rsp), %rbp
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 152(%rsp), %rbp # 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %r13 # 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 120(%rsp) # 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 184(%rsp), %r14 # 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 192(%rsp), %rbp # 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 344(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 144(%rsp) # 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 184(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 192(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %r13 # 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 168(%rsp), %r12 # 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 120(%rsp), %r14 # 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 272(%rsp), %r15
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 176(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 168(%rsp) # 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ movq 128(%rsp), %rdx # 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 200(%rsp), %rbx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %r8 # 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rdx # 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 176(%rsp), %rsi # 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 168(%rsp), %rdi # 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 32(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 40(%rsp), %r14 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 48(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 184(%rsp), %rax # 8-byte Folded Reload
+ movq 72(%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 192(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 136(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp # imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end116:
+ .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2
+
+ .globl mcl_fp_addPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre8Lbmi2,@function
+mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end117:
+ .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2
+
+ .globl mcl_fp_subPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre8Lbmi2,@function
+mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end118:
+ .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2
+
+ .globl mcl_fp_shr1_8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_8Lbmi2,@function
+mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2
+# BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+.Lfunc_end119:
+ .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2
+
+ .globl mcl_fp_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add8Lbmi2,@function
+mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+.LBB120_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end120:
+ .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2
+
+ .globl mcl_fp_addNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF8Lbmi2,@function
+mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ adcq 40(%rsi), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rbx, %r9
+ adcq 48(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end121:
+ .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2
+
+ .globl mcl_fp_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub8Lbmi2,@function
+mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB122_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+.LBB122_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end122:
+ .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2
+
+ .globl mcl_fp_subNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF8Lbmi2,@function
+mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movq 56(%rsi), %r14
+ movq 48(%rsi), %rax
+ movq 40(%rsi), %rcx
+ movq 32(%rsi), %rdi
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %r11
+ sbbq 32(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %r14
+ movq %r14, %rsi
+ sarq $63, %rsi
+ movq 56(%r8), %r10
+ andq %rsi, %r10
+ movq 48(%r8), %rbx
+ andq %rsi, %rbx
+ movq 40(%r8), %rdi
+ andq %rsi, %rdi
+ movq 32(%r8), %rbp
+ andq %rsi, %rbp
+ movq 24(%r8), %rdx
+ andq %rsi, %rdx
+ movq 16(%r8), %rcx
+ andq %rsi, %rcx
+ movq 8(%r8), %rax
+ andq %rsi, %rax
+ andq (%r8), %rsi
+ addq %r13, %rsi
+ adcq %r12, %rax
+ movq %rsi, (%r9)
+ adcq %r15, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r11, %rdx
+ movq %rdx, 24(%r9)
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%r9)
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 48(%r9)
+ adcq %r14, %r10
+ movq %r10, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end123:
+ .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2
+
+ .globl mcl_fpDbl_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add8Lbmi2,@function
+mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, %rax
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -8(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -32(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end124:
+ .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2
+
+ .globl mcl_fpDbl_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8Lbmi2,@function
+mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv576x64,@function
+.LmulPv576x64: # @mulPv576x64
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ mulxq 64(%rsi), %rax, %rcx
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 72(%rdi)
+ movq %rdi, %rax
+ retq
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9Lbmi2,@function
+mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2
+
+ .globl mcl_fpDbl_mulPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9Lbmi2,@function
+mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rdx, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq %rdi, %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ callq .LmulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r15 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %r14
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 80(%rsp), %r14 # 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq (%rsp), %r12 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq (%rsp), %r13 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq (%rsp), %r14 # 8-byte Folded Reload
+ adcq 8(%rsp), %r15 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r12 # 8-byte Folded Reload
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 80(%rsp), %rcx # 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2
+
+ .globl mcl_fpDbl_sqrPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9Lbmi2,@function
+mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rsi, %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ movq %rdi, %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 16(%rsp), %r14 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax # 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx # 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2
+
+ .globl mcl_fp_mont9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont9Lbmi2,@function
+mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r14 # 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 40(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 96(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rbx # 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r12 # 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r14 # 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 104(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 16(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 8(%rsp), %rbp # 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r9 # 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r10 # 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rdi # 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 56(%rsp), %rdx # 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 72(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq 80(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 64(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end130:
+ .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2
+
+ .globl mcl_fp_montNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF9Lbmi2,@function
+mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r12
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1392(%rsp), %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 8(%rsp), %r15 # 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rdx # 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 96(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 8(%rsp) # 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1232(%rsp), %rax
+ movq 8(%rsp), %rcx # 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 56(%rsp) # 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r13 # 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1072(%rsp), %rax
+ movq 104(%rsp), %rcx # 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 56(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 920(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 912(%rsp), %rax
+ movq 56(%rsp), %rcx # 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdx # 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, 104(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 760(%rsp), %rbx
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 752(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rdx # 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 600(%rsp), %r13
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rdx # 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 88(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 440(%rsp), %r14
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 88(%rsp), %r15 # 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 432(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rdx # 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 112(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r14 # 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 280(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 56(%rsp) # 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 272(%rsp), %rcx
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ addq 120(%rsp), %rbx
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 88(%rsp), %rsi # 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdi # 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, 104(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 96(%rsp), %r9 # 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq 104(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end131:
+ .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2
+
+ .globl mcl_fp_montRed9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed9Lbmi2,@function
+mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp # imm = 0x3A8
+ movq %rdx, %rax
+ movq %rax, 128(%rsp) # 8-byte Spill
+ movq %rdi, 80(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, 192(%rsp) # 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 856(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 856(%rsp), %r14
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 200(%rsp) # 8-byte Folded Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ movq 152(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 696(%rsp), %r15
+ movq (%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 168(%rsp), %r14 # 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 176(%rsp), %r15 # 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ movq 184(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 152(%rsp) # 8-byte Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 616(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 136(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 168(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq 192(%rsp), %r14 # 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 536(%rsp), %rbx
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 136(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 176(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, 192(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 208(%rsp), %r15 # 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 184(%rsp), %rbp # 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 144(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 456(%rsp), %r14
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rcx # 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 208(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 184(%rsp) # 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq 152(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 376(%rsp), %r15
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r15 # 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 296(%rsp), %rbx
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq 192(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 152(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 120(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, 192(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r9 # 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %r8 # 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rbx # 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 152(%rsp), %rdx # 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 152(%rsp) # 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 40(%rsp), %r15 # 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 48(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 64(%rsp), %r8 # 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 72(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq 192(%rsp), %rsi # 8-byte Folded Reload
+ movq 80(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 160(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 200(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 208(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 152(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp # imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end132:
+ .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2
+
+ .globl mcl_fp_addPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre9Lbmi2,@function
+mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end133:
+ .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2
+
+ .globl mcl_fp_subPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre9Lbmi2,@function
+mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2
+# BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end134:
+ .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2
+
+ .globl mcl_fp_shr1_9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_9Lbmi2,@function
+mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end135:
+ .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2
+
+ .globl mcl_fp_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add9Lbmi2,@function
+mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+.LBB136_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end136:
+ .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2
+
+ .globl mcl_fp_addNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF9Lbmi2,@function
+mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq %r9, %rdi
+ adcq 56(%rsi), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -16(%rsp), %r14 # 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -8(%rsp), %r11 # 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end137:
+ .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2
+
+ .globl mcl_fp_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub9Lbmi2,@function
+mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB138_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+.LBB138_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end138:
+ .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2
+
+ .globl mcl_fp_subNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF9Lbmi2,@function
+mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r10
+ movq 64(%rsi), %r14
+ movq 56(%rsi), %rax
+ movq 48(%rsi), %rcx
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ sbbq 32(%rdx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 64(%rdx), %r14
+ movq %r14, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbp
+ shldq $1, %r14, %rbp
+ movq 24(%r8), %rbx
+ andq %rbp, %rbx
+ movq 8(%r8), %rdi
+ andq %rbp, %rdi
+ andq (%r8), %rbp
+ movq 64(%r8), %r11
+ andq %rdx, %r11
+ rorxq $63, %rdx, %rax
+ andq 56(%r8), %rdx
+ movq 48(%r8), %r9
+ andq %rax, %r9
+ movq 40(%r8), %rsi
+ andq %rax, %rsi
+ movq 32(%r8), %rcx
+ andq %rax, %rcx
+ andq 16(%r8), %rax
+ addq %r13, %rbp
+ adcq %r12, %rdi
+ movq %rbp, (%r10)
+ adcq %r15, %rax
+ movq %rdi, 8(%r10)
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, 16(%r10)
+ movq %rbx, 24(%r10)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 32(%r10)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%r10)
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%r10)
+ adcq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%r10)
+ adcq %r14, %r11
+ movq %r11, 64(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end139:
+ .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2
+
+ .globl mcl_fpDbl_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add9Lbmi2,@function
+mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi # 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -8(%rsp), %r8 # 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end140:
+ .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2
+
+ .globl mcl_fpDbl_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9Lbmi2,@function
+mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -40(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -16(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s
new file mode 100644
index 000000000..aa677d2ea
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s
@@ -0,0 +1,16652 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 16, 0x90
+ .type makeNIST_P192L,@function
+makeNIST_P192L: # @makeNIST_P192L
+# BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %rcx, %r12
+ adcq %r14, %r15
+ movq %rdi, %r10
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %rbp, %rcx
+ movq %r9, %rbx
+ adcq $0, %rbx
+ addq %r13, %rsi
+ adcq %r12, %rcx
+ adcq %r15, %rbx
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %r11
+ addq %r14, %r9
+ adcq %rdi, %rax
+ adcq $0, %rdx
+ addq %rbp, %rcx
+ adcq %rbx, %r9
+ adcq %r10, %rax
+ adcq $0, %rdx
+ addq %rdx, %rsi
+ adcq $0, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r9, %r8
+ adcq %rax, %rsi
+ adcq %rdx, %rcx
+ adcq $0, %rbp
+ addq %rdx, %r8
+ adcq %r9, %rsi
+ adcq %rax, %rcx
+ adcq $0, %rbp
+ addq %rbp, %r8
+ adcq %rsi, %rbp
+ adcq $0, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r8, %rdx
+ addq $1, %rdx
+ movq %rbp, %rsi
+ adcq $1, %rsi
+ movq %rcx, %rdi
+ adcq $0, %rdi
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r8, %rdx
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rdx, (%rbx)
+ testb %al, %al
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rbx)
+ cmovneq %rcx, %rdi
+ movq %rdi, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq mcl_fpDbl_mulPre3L@PLT
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ andl $511, %ebx # imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movq %rbx, %rsi
+ shrq $9, %rsi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je .LBB4_1
+# BB#3: # %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx # imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+.LBB4_2: # %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L
+# BB#0:
+ movq %rdx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L
+# BB#0:
+ movq (%rdx), %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq %rax
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 16, 0x90
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: # @mcl_fp_mont1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, (%rdi)
+ retq
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 16, 0x90
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: # @mcl_fp_montNF1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ movq %rdx, %rax
+ subq %rcx, %rax
+ cmovsq %rdx, %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 16, 0x90
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: # @mcl_fp_montRed1L
+# BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ mulq %r8
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 16, 0x90
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: # @mcl_fp_addPre1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 16, 0x90
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: # @mcl_fp_subPre1L
+# BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 16, 0x90
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: # @mcl_fp_shr1_1L
+# BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 16, 0x90
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: # @mcl_fp_add1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+.LBB14_2: # %carry
+ retq
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 16, 0x90
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: # @mcl_fp_addNF1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 16, 0x90
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: # @mcl_fp_sub1L
+# BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB16_2
+# BB#1: # %nocarry
+ retq
+.LBB16_2: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 16, 0x90
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: # @mcl_fp_subNF1L
+# BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 16, 0x90
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: # @mcl_fpDbl_add1L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L
+# BB#0:
+ movq %rdx, %r8
+ movq %r8, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%r10), %rcx
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ addq %r9, %rsi
+ adcq $0, %r14
+ movq 8(%r10), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %rbx
+ addq %rsi, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %r8
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %r10
+ addq %r10, %rsi
+ movq %r9, %rcx
+ adcq $0, %rcx
+ movq %r8, %rax
+ mulq %r8
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rax
+ movq %rax, 16(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 16, 0x90
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: # @mcl_fp_mont2L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rsi
+ movq 8(%rdx), %r9
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq $0, %r15
+ movq -8(%rcx), %r10
+ movq (%rcx), %rbp
+ movq %r14, %rsi
+ imulq %r10, %rsi
+ movq 8(%rcx), %rdi
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r13, %rbx
+ adcq %r15, %rcx
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rsi
+ adcq %r15, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbp
+ addq %r9, %rdx
+ adcq $0, %rcx
+ addq %r8, %rax
+ adcq %rsi, %rdx
+ adcq %r14, %rcx
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rcx, %rsi
+ sbbq %rdi, %rsi
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rcx, %rsi
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ movq %rsi, 8(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 16, 0x90
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: # @mcl_fp_montNF2L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rbp
+ movq 8(%rdx), %r9
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %r14
+ movq -8(%rcx), %r10
+ movq (%rcx), %r15
+ movq %rsi, %rbx
+ imulq %r10, %rbx
+ movq 8(%rcx), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ addq %r14, %rbp
+ adcq $0, %rcx
+ addq %rsi, %rbx
+ adcq %r13, %rbp
+ adcq $0, %rcx
+ addq %r12, %rbp
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq $0, %rsi
+ addq %rbp, %r8
+ adcq %rcx, %rbx
+ adcq $0, %rsi
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r10, %rax
+ mulq %r15
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %rcx, %rsi
+ movq %rbp, %rax
+ subq %r15, %rax
+ movq %rsi, %rcx
+ sbbq %rdi, %rcx
+ cmovsq %rbp, %rax
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 16, 0x90
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: # @mcl_fp_montRed2L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r9
+ movq (%rdx), %r11
+ movq (%rsi), %rbx
+ movq %rbx, %rcx
+ imulq %r9, %rcx
+ movq 8(%rdx), %r14
+ movq %rcx, %rax
+ mulq %r14
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq %r11
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq $0, %r8
+ movq 24(%rsi), %r15
+ addq %rbx, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq $0, %r15
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %rcx, %r9
+ movq %r9, %rax
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %r11
+ addq %r10, %rdx
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r8, %rdx
+ adcq %r15, %rsi
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r14, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 16, 0x90
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: # @mcl_fp_addPre2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 16, 0x90
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: # @mcl_fp_subPre2L
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 16, 0x90
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: # @mcl_fp_shr1_2L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 16, 0x90
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: # @mcl_fp_add2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+.LBB29_2: # %carry
+ retq
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 16, 0x90
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: # @mcl_fp_addNF2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 16, 0x90
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: # @mcl_fp_sub2L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB31_2
+# BB#1: # %nocarry
+ retq
+.LBB31_2: # %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 16, 0x90
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: # @mcl_fp_subNF2L
+# BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 16, 0x90
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: # @mcl_fpDbl_add2L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L
+# BB#0:
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r9, %r10
+ movq %r10, 16(%rdi)
+ adcq $0, %r8
+ movq %r8, 24(%rdi)
+ retq
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%r10), %rbx
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ addq %rcx, %rbx
+ adcq %rsi, %r15
+ adcq $0, %r14
+ movq 8(%r10), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 8(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r12, %r14
+ movq 16(%r10), %r15
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %r15
+ addq %rsi, %rax
+ movq %rax, 16(%rdi)
+ adcq %rbp, %rcx
+ adcq %r14, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, (%rdi)
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r12
+ addq %r12, %rbx
+ movq %r14, %r13
+ adcq %r11, %r13
+ movq %r8, %rcx
+ adcq $0, %rcx
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rsi
+ movq %rax, %rsi
+ addq %r12, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r13, %rsi
+ adcq %r15, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ movq %r10, %rax
+ mulq %r10
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r15, %rcx
+ adcq %rbx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rax
+ movq %rax, 32(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 16, 0x90
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: # @mcl_fp_mont3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq %r10, -56(%rsp) # 8-byte Spill
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%r10), %rdi
+ mulq %rdi
+ movq %rax, %rbp
+ movq %rdx, %r8
+ movq (%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r12
+ movq %rdx, %r11
+ addq %rsi, %r11
+ adcq %rbp, %r15
+ adcq $0, %r8
+ movq -8(%rcx), %r14
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq %r14, %rbp
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r13, %rsi
+ adcq $0, %rcx
+ addq %r12, %rax
+ adcq %r11, %rbp
+ movq 8(%r10), %rbx
+ adcq %r15, %rsi
+ adcq %r8, %rcx
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rax
+ movq -64(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq %r9, %r12
+ adcq $0, %r15
+ addq %rbp, %r8
+ adcq %rsi, %rbx
+ adcq %rcx, %r12
+ adcq %rdi, %r15
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %r9, %rsi
+ adcq $0, %r13
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq %r12, %rsi
+ adcq %r15, %r13
+ adcq $0, %r11
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq %r10, %r15
+ adcq $0, %r8
+ addq %rbp, %r9
+ adcq %rsi, %rcx
+ adcq %r13, %r15
+ adcq %r11, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ imulq %r9, %r14
+ movq %r14, %rax
+ movq -16(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %r14, %rax
+ movq -8(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r14, %rax
+ movq -24(%rsp), %rbp # 8-byte Reload
+ mulq %rbp
+ addq %r11, %rdx
+ adcq %r10, %rsi
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rcx, %rdx
+ adcq %r15, %rsi
+ adcq %r8, %rbx
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rsi, %rcx
+ sbbq %r13, %rcx
+ movq %rbx, %rbp
+ sbbq %r12, %rbp
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %rbp
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ movq %rbp, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 16, 0x90
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: # @mcl_fp_montNF3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %r10, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rsi), %rbx
+ movq %rbx, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r14, %rdi
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq (%rcx), %r11
+ movq %r11, -48(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ imulq %r14, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %rcx
+ adcq %rdi, %r12
+ adcq $0, %r15
+ addq %rdx, %rcx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rbp
+ adcq %r9, %r12
+ adcq %r8, %r15
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ movq -64(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ addq %rcx, %r13
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ movq %r13, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -56(%rsp), %rdi # 8-byte Reload
+ mulq %rdi
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %r12
+ adcq %rbx, %r15
+ adcq $0, %rsi
+ addq %rdx, %r12
+ adcq %r9, %r15
+ adcq %r8, %rsi
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r10
+ movq %rax, %r10
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbp
+ addq %r12, %r10
+ adcq %r15, %rbx
+ adcq %rsi, %rcx
+ adcq $0, %rbp
+ imulq %r10, %r14
+ movq %r14, %rax
+ movq -16(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r8
+ movq %rax, %rsi
+ movq %r14, %rax
+ movq %rdi, %r11
+ mulq %r11
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %r14, %rax
+ movq -48(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ addq %r10, %rax
+ adcq %rbx, %rdi
+ adcq %rcx, %rsi
+ adcq $0, %rbp
+ addq %rdx, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbp
+ movq %rdi, %rax
+ subq %r14, %rax
+ movq %rsi, %rcx
+ sbbq %r11, %rcx
+ movq %rbp, %rbx
+ sbbq %r15, %rbx
+ movq %rbx, %rdx
+ sarq $63, %rdx
+ cmovsq %rdi, %rax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovsq %rbp, %rbx
+ movq %rbx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 16, 0x90
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: # @mcl_fp_montRed3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r9
+ movq (%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, %rbx
+ imulq %r9, %rbx
+ movq 16(%rcx), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %r12
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdi, %rbx
+ movq %rdx, %rcx
+ addq %r14, %rcx
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %r13
+ addq %r15, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r8
+ adcq $0, %r13
+ adcq $0, %rdi
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %rcx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r12
+ movq %rdx, %r14
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %rbp, %r14
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r10, %rbx
+ adcq %r8, %r14
+ adcq %r13, %r11
+ adcq $0, %rdi
+ adcq $0, %r15
+ imulq %rbx, %r9
+ movq %r9, %rax
+ movq -24(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r9, %rax
+ movq -32(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ movq -16(%rsp), %rcx # 8-byte Reload
+ mulq %rcx
+ addq %r10, %rdx
+ adcq %r8, %rsi
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r14, %rdx
+ adcq %r11, %rsi
+ adcq %rdi, %rbp
+ adcq $0, %r15
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rsi, %rdi
+ sbbq %r13, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %r15
+ andl $1, %r15d
+ cmovneq %rbp, %rcx
+ testb %r15b, %r15b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 16, 0x90
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: # @mcl_fp_addPre3L
+# BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 16, 0x90
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: # @mcl_fp_subPre3L
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 16, 0x90
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: # @mcl_fp_shr1_3L
+# BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 16, 0x90
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: # @mcl_fp_add3L
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+.LBB44_2: # %carry
+ retq
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 16, 0x90
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: # @mcl_fp_addNF3L
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 16, 0x90
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: # @mcl_fp_sub3L
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB46_2
+# BB#1: # %nocarry
+ retq
+.LBB46_2: # %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 16, 0x90
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: # @mcl_fp_subNF3L
+# BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 16, 0x90
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: # @mcl_fpDbl_add3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r10
+ movq %r10, 24(%rdi)
+ adcq $0, %r8
+ movq %r8, 32(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ movq %rdx, %rbp
+ mulq %rbx
+ movq %rdx, %r15
+ movq 16(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r8
+ addq %r15, %r8
+ adcq %r9, %r13
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq %rbp, %r9
+ movq 8(%r9), %rbp
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq -64(%rsp), %r14 # 8-byte Reload
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ addq %r8, %rax
+ movq %rax, 8(%rdi)
+ adcq %r13, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq 16(%r9), %rbp
+ movq %r14, %rax
+ mulq %rbp
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq -56(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rcx # 8-byte Folded Reload
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ addq %rbx, %rax
+ movq %rax, 16(%rdi)
+ adcq %r12, %rcx
+ adcq %r15, %r10
+ adcq %r13, %r11
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %rcx
+ adcq %r8, %r10
+ adcq %r14, %r11
+ adcq %r9, %r13
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq (%rsi)
+ addq %rcx, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %r12
+ adcq %r13, %r14
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ adcq %r9, %r14
+ movq %r14, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rsi, %r10
+ movq 16(%r10), %r9
+ movq 24(%r10), %r11
+ movq (%r10), %r15
+ movq 8(%r10), %r8
+ movq %r15, %rax
+ mulq %r15
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %r8
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %rsi
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %r15
+ addq %rax, %rbp
+ movq %rdx, %r8
+ adcq %r12, %r8
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ addq %rax, %rbp
+ movq %rbp, 8(%rdi)
+ adcq %r14, %r8
+ movq -40(%rsp), %rsi # 8-byte Reload
+ adcq %rsi, %rcx
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %rdx, %r8
+ adcq %r13, %rcx
+ movq -24(%rsp), %r15 # 8-byte Reload
+ adcq %r15, %rbx
+ adcq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r9
+ movq %rax, %r9
+ addq %r12, %r8
+ movq %r8, 16(%rdi)
+ adcq %rsi, %rcx
+ adcq %rbx, %r9
+ adcq %rbp, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ movq 24(%r10), %rbp
+ movq %rbp, %rax
+ mulq 16(%r10)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 8(%r10)
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq (%r10)
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r9, %rbx
+ adcq %r11, %r14
+ adcq %r12, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r15, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r13, %r14
+ movq %r14, 40(%rdi)
+ adcq %r8, %rax
+ movq %rax, 48(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 16, 0x90
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: # @mcl_fp_mont4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %rbp
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r10
+ movq (%rsi), %rbx
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r13
+ addq %rsi, %r13
+ adcq %r8, %r14
+ adcq %r9, %r10
+ adcq $0, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rcx), %r8
+ movq %r8, -32(%rsp) # 8-byte Spill
+ movq %r11, %rdi
+ imulq %rax, %rdi
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rsi
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %r8
+ movq %rdx, %rcx
+ addq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r12, %rbp
+ adcq $0, %r9
+ addq %r11, %rax
+ adcq %r13, %rcx
+ adcq %r14, %rsi
+ adcq %r10, %rbp
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rdi
+ addq %r15, %rdi
+ adcq %r14, %rbx
+ adcq %r11, %r10
+ adcq $0, %r12
+ addq %rcx, %r8
+ adcq %rsi, %rdi
+ adcq %rbp, %rbx
+ adcq %r9, %r10
+ adcq %r13, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r8, %rsi
+ imulq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %rbp, %r11
+ adcq %r14, %r9
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r8, %rax
+ adcq %rdi, %r11
+ adcq %rbx, %r9
+ adcq %r10, %rcx
+ adcq %r12, %r13
+ adcq $0, %r15
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %r14
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r11, %r8
+ adcq %r9, %rbp
+ adcq %rcx, %r14
+ adcq %r13, %r10
+ adcq %r15, %r12
+ sbbq %r13, %r13
+ movq %r8, %rsi
+ imulq -24(%rsp), %rsi # 8-byte Folded Reload
+ andl $1, %r13d
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r15, %rsi
+ adcq %r11, %rbx
+ adcq %r9, %rcx
+ adcq $0, %rdi
+ addq %r8, %rax
+ adcq %rbp, %rsi
+ adcq %r14, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %rdi
+ adcq $0, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r15, %r11
+ adcq %r14, %r10
+ adcq $0, %r8
+ addq %rsi, %r9
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ adcq %rdi, %r10
+ adcq %r13, %r8
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -24(%rsp), %rcx # 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -40(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq -32(%rsp), %rcx # 8-byte Reload
+ mulq %rcx
+ addq %r12, %rdx
+ adcq %r15, %rdi
+ adcq -24(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rbp, %rdx
+ adcq %r11, %rdi
+ adcq %r10, %r13
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rdi, %rcx
+ sbbq %r14, %rcx
+ movq %r13, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq -8(%rsp), %rbp # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -88(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rdi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r13, %r8
+ movq %r8, 16(%rdx)
+ movq %rbp, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 16, 0x90
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: # @mcl_fp_montNF4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r9
+ movq (%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r8, %rbx
+ adcq %r15, %r9
+ adcq $0, %r12
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq (%rcx), %r8
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq %r11, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %r8
+ addq %r11, %rax
+ adcq %rdi, %rcx
+ adcq %rbx, %rbp
+ adcq %r9, %r13
+ adcq $0, %r12
+ addq %rdx, %rcx
+ adcq %r14, %rbp
+ adcq %r10, %r13
+ adcq %r15, %r12
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r9
+ addq %r14, %r9
+ adcq %r11, %r8
+ adcq %r10, %rsi
+ adcq $0, %rbx
+ addq %rcx, %rdi
+ adcq %rbp, %r9
+ adcq %r13, %r8
+ adcq %r12, %rsi
+ adcq $0, %rbx
+ movq %rdi, %rcx
+ imulq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rcx, %rax
+ movq -80(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r9, %rbp
+ adcq %r8, %r13
+ adcq %rsi, %r12
+ adcq $0, %rbx
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rbx
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq $0, %rsi
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %rcx
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r9, %rbx
+ imulq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rdi, %rbp
+ adcq %rcx, %r13
+ adcq %r8, %r12
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %r10
+ adcq %rcx, %r8
+ adcq $0, %rbx
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %r10
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ movq -8(%rsp), %rsi # 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rsi, %rax
+ movq -72(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -64(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rsi, %rax
+ movq -80(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ addq %r9, %rcx
+ adcq %rdi, %rax
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %rbx
+ addq %r15, %rax
+ adcq %rdx, %rbp
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %rcx
+ subq %r11, %rcx
+ movq %rbp, %rdx
+ sbbq %rsi, %rdx
+ movq %r13, %rdi
+ sbbq %r14, %rdi
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ cmovsq %rax, %rcx
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %r13, %rdi
+ movq %rdi, 16(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 16, 0x90
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: # @mcl_fp_montRed4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, %rbx
+ imulq %rax, %rbx
+ movq %rax, %r9
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 16(%rcx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, %r13
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %rbp
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 56(%rsi), %rcx
+ movq 48(%rsi), %rdx
+ addq %r12, %rax
+ movq 40(%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r8
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, %r12
+ adcq $0, %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %r13
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -48(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %r9, %rcx
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbx, %rax
+ adcq %r15, %rsi
+ adcq %r10, %rcx
+ adcq %r8, %r14
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %r12, -64(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %rdi
+ movq %rsi, %rbx
+ imulq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -8(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %r8
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %rax
+ adcq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq -40(%rsp), %rcx # 8-byte Reload
+ imulq %rbx, %rcx
+ movq %rcx, %rax
+ mulq %r12
+ movq %rdx, %r13
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ movq -32(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -24(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -48(%rsp), %r9 # 8-byte Reload
+ mulq %r9
+ addq %rcx, %rdx
+ adcq %r12, %rbp
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %rax
+ adcq %r15, %rdx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rbp, %rcx
+ sbbq %rsi, %rcx
+ movq %r11, %rbx
+ sbbq %r14, %rbx
+ movq %r13, %rsi
+ sbbq -8(%rsp), %rsi # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rsi
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -56(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r11, %rbx
+ movq %rbx, 16(%rdx)
+ movq %rsi, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 16, 0x90
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: # @mcl_fp_addPre4L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 16, 0x90
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: # @mcl_fp_subPre4L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 16, 0x90
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: # @mcl_fp_shr1_4L
+# BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 16, 0x90
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: # @mcl_fp_add4L
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+.LBB59_2: # %carry
+ retq
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 16, 0x90
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: # @mcl_fp_addNF4L
+# BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 16, 0x90
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: # @mcl_fp_sub4L
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB61_2
+# BB#1: # %nocarry
+ retq
+.LBB61_2: # %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 16, 0x90
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: # @mcl_fp_subNF4L
+# BB#0:
+ pushq %rbx
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r8
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ subq (%rdx), %r9
+ sbbq 8(%rdx), %r10
+ sbbq 16(%rdx), %r8
+ sbbq 24(%rdx), %r11
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r9, %rdx
+ movq %rdx, (%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 16, 0x90
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: # @mcl_fpDbl_add4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r9, %r10
+ movq %r10, 32(%rdi)
+ adcq $0, %r8
+ movq %r8, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 8(%r9), %rbx
+ movq %rbx, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdx, %r8
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %r13
+ movq 24(%r9), %r14
+ movq 32(%r9), %r15
+ movq %rax, (%rdi)
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %r13, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ addq -32(%rsp), %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %r11, %rcx
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, %r10
+ movq 8(%r8), %r11
+ movq %r15, %rax
+ mulq %r11
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r14, %rax
+ mulq %r11
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %r11
+ movq %rdx, %r8
+ movq %rax, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ addq %rdi, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rbx
+ adcq %rcx, %r13
+ adcq %r12, %r15
+ adcq %r10, %rsi
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rbx
+ adcq %r14, %r13
+ adcq %r8, %r15
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq 32(%r9), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq 16(%rdi), %r12
+ mulq %r12
+ movq %rax, %r11
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rax, %r10
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rax, %r8
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq (%r9), %r14
+ movq 8(%r9), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r14, %rax
+ mulq %r12
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ addq %rbx, %rax
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rax, 16(%rbx)
+ adcq %r13, %rbp
+ adcq %r15, %r8
+ adcq %rsi, %r10
+ adcq %rcx, %r11
+ sbbq %rcx, %rcx
+ movq 24(%rdi), %rsi
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ andl $1, %ecx
+ addq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ addq %rdi, %rbp
+ movq %rbp, 24(%rbx)
+ adcq %r12, %r8
+ adcq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r15, %r8
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdi
+ movq %rdi, %rax
+ mulq 32(%r9)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%r9)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 16(%r9)
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 8(%r9)
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%r9)
+ addq %r8, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %rax, 32(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %rbx
+ adcq %rcx, %r13
+ adcq %rsi, %r15
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r14, %r13
+ movq %r13, 56(%rdi)
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 64(%rdi)
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r11
+ movq (%rsi), %r13
+ movq 8(%rsi), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %r13
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %r13
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %r13, %rax
+ mulq %r13
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ adcq %rbx, %r12
+ adcq %rbp, %r10
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r15, %r12
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq -48(%rsp), %r9 # 8-byte Folded Reload
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rbx, %r12
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r8 # 8-byte Folded Reload
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r13
+ addq %r12, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 16(%rax)
+ adcq %r10, %r15
+ adcq %r9, %r13
+ adcq %r8, %r14
+ adcq %rdi, %r11
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq -24(%rsp), %r11 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rax, %rdi
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r15, %rdi
+ movq -8(%rsp), %r15 # 8-byte Reload
+ movq %rdi, 24(%r15)
+ adcq %r13, %r8
+ adcq %r14, %r12
+ adcq %r11, %rbx
+ adcq %r10, %r9
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ adcq %rdx, %r9
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %r8, %rsi
+ movq %rsi, 32(%r15)
+ adcq %r12, %rdi
+ adcq %rbx, %rax
+ adcq %r9, %rbp
+ adcq %r10, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ movq %rdi, 40(%r15)
+ adcq %r14, %rax
+ movq %rax, 48(%r15)
+ adcq %rdx, %rbp
+ movq %rbp, 56(%r15)
+ adcq -24(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 64(%r15)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 72(%r15)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 16, 0x90
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: # @mcl_fp_mont5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq 24(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %rbp
+ movq (%rsi), %rbx
+ movq %rbx, -80(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdx, %r15
+ addq %rsi, %r15
+ adcq %r10, %r11
+ adcq %r9, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r8, %r12
+ movq %r12, -112(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -120(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ imulq %rdx, %rbp
+ movq (%rcx), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 24(%rcx), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r14
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %r9
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r8, %rsi
+ adcq %r10, %rbx
+ adcq %r13, %rdi
+ adcq $0, %r14
+ addq -128(%rsp), %rax # 8-byte Folded Reload
+ adcq %r15, %rbp
+ adcq %r11, %rsi
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %r15
+ adcq %rsi, %rcx
+ adcq %rbx, %r8
+ adcq %rdi, %r12
+ adcq %r14, %r11
+ adcq %r9, %r13
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r15, %rsi
+ imulq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %rdi, %rbx
+ adcq -128(%rsp), %r10 # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %r15, %rax
+ adcq %rcx, %rbx
+ adcq %r8, %r10
+ adcq %r12, %r9
+ adcq %r11, %rbp
+ adcq %r13, %r14
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r15
+ addq %r8, %r15
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %r12
+ adcq %r10, %r15
+ adcq %r9, %rdi
+ adcq %rbp, %rsi
+ adcq %r14, %r11
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r14, %rbp
+ adcq %r10, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r12, %rax
+ adcq %r15, %rbp
+ adcq %rdi, %rbx
+ adcq %rsi, %rcx
+ adcq %r11, %r9
+ adcq %r13, %r8
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %r15, %rdi
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %r14
+ adcq %rbx, %rsi
+ adcq %rcx, %rdi
+ adcq %r9, %r11
+ adcq %r8, %r10
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r14, %rbp
+ imulq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r8, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rdi, %rbx
+ adcq %r11, %rcx
+ adcq %r10, %r15
+ adcq %r13, %r9
+ movq -96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %rdi, %r8
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r10
+ adcq %rbx, %r8
+ adcq %rcx, %r12
+ adcq %r15, %r11
+ adcq %r9, %r13
+ adcq %r14, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -40(%rsp), %rsi # 8-byte Reload
+ imulq %r10, %rsi
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ addq %r9, %rdx
+ adcq %r15, %rdi
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r14
+ andl $1, %ecx
+ addq %r10, %rax
+ adcq %r8, %rdx
+ adcq %r12, %rdi
+ adcq %r11, %rbp
+ adcq %r13, %rbx
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rdi, %r8
+ sbbq -24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rbp, %r9
+ sbbq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r14, %r11
+ sbbq (%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r10
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdi, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ movq %r10, 24(%rcx)
+ cmovneq %r14, %r11
+ movq %r11, 32(%rcx)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 16, 0x90
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: # @mcl_fp_montNF5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, %r15
+ movq %rdx, %r10
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %r14
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %r9
+ movq (%rsi), %rbp
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdx, %r12
+ addq %rdi, %r12
+ adcq %r8, %r11
+ adcq %r13, %r9
+ adcq %r15, %r14
+ adcq $0, %r10
+ movq -8(%rcx), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ imulq %rdx, %rsi
+ movq (%rcx), %r8
+ movq %r8, -96(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, -24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r8
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r11, %rbx
+ adcq %r9, %rdi
+ adcq %r14, %rcx
+ adcq $0, %r10
+ addq %rdx, %rbp
+ adcq %r13, %rbx
+ adcq %r15, %rdi
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq %r8, %r11
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %rsi
+ adcq %rbx, %r12
+ adcq %rdi, %r11
+ adcq %rcx, %r9
+ adcq %r10, %r15
+ adcq $0, %r13
+ movq %rsi, %rdi
+ imulq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r12, %r10
+ adcq %r11, %r8
+ adcq %r9, %r14
+ adcq %r15, %rbp
+ adcq $0, %r13
+ addq %rdx, %r10
+ adcq %rbx, %r8
+ adcq %rcx, %r14
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %r10, %r11
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %r9
+ adcq $0, %r15
+ movq %r11, %rbx
+ imulq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r15
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r14, %r8
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbp, %r14
+ adcq %r10, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r15, %r9
+ adcq $0, %r11
+ movq %r14, %rbx
+ imulq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r11
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r15, %r8
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rbp, %r12
+ adcq %r10, %rdi
+ adcq %r8, %r15
+ adcq %r13, %r14
+ adcq %r11, %r9
+ adcq $0, %rbx
+ movq -32(%rsp), %r8 # 8-byte Reload
+ imulq %r12, %r8
+ movq %r8, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r8, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ movq %r8, %r13
+ movq -96(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r13, %rax
+ movq -72(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ addq %r12, %r8
+ adcq %rdi, %rax
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %r9, %rcx
+ adcq $0, %rbx
+ addq %r11, %rax
+ adcq %rdx, %rsi
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %r11
+ subq %r10, %r11
+ movq %rsi, %r10
+ sbbq %r13, %r10
+ movq %rbp, %r8
+ sbbq -24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %rdx
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r11
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %r11, (%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 8(%rax)
+ cmovsq %rbp, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rcx, %r9
+ movq %r9, 24(%rax)
+ cmovsq %rbx, %rdx
+ movq %rdx, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 16, 0x90
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: # @mcl_fp_montRed5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rbp
+ imulq %rax, %rbp
+ movq 32(%rcx), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r13
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ addq %rbx, %rcx
+ adcq %r12, %r8
+ adcq %r14, %r15
+ adcq %r10, %r11
+ adcq $0, %r13
+ addq %r9, %rax
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r13
+ movq %r13, -88(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rcx, %rsi
+ movq -64(%rsp), %r9 # 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rbx, %r13
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rcx, %rax
+ adcq %r8, %rsi
+ adcq %r15, %r13
+ adcq %r11, %r12
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq %rsi, %rcx
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r8, %rbp
+ adcq -104(%rsp), %rbx # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rsi, %rax
+ adcq %r13, %rbp
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq %r10, %r11
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq %rbp, %rcx
+ imulq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ movq -48(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %r8, %rcx
+ adcq %r10, %r13
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbp, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %r13
+ adcq %r11, %r12
+ adcq %r9, %r14
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq -64(%rsp), %rbx # 8-byte Reload
+ imulq %rcx, %rbx
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rbx, %rax
+ movq %rbx, %r10
+ movq -32(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %r10, %rax
+ movq -24(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ addq %r8, %rdx
+ adcq %r15, %rbx
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r13, %rdx
+ adcq %r12, %rbx
+ adcq %r14, %rbp
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r11, %rcx
+ movq %rbp, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq -48(%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rsi, %r11
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -80(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbx, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %rbp, %r8
+ movq %r8, 16(%rdx)
+ cmovneq %r9, %r10
+ movq %r10, 24(%rdx)
+ movq %r11, 32(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 16, 0x90
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: # @mcl_fp_addPre5L
+# BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 16, 0x90
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: # @mcl_fp_subPre5L
+# BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 16, 0x90
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: # @mcl_fp_shr1_5L
+# BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 16, 0x90
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: # @mcl_fp_add5L
+# BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+.LBB74_2: # %carry
+ popq %rbx
+ retq
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 16, 0x90
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: # @mcl_fp_addNF5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 16, 0x90
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: # @mcl_fp_sub5L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+.LBB76_2: # %nocarry
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 16, 0x90
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: # @mcl_fp_subNF5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r14
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r11
+ subq (%rdx), %r10
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ sbbq 24(%rdx), %r8
+ sbbq 32(%rdx), %r14
+ movq %r14, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r14, %rsi
+ movq 8(%rcx), %rbx
+ andq %rsi, %rbx
+ andq (%rcx), %rsi
+ movq 32(%rcx), %r15
+ andq %rdx, %r15
+ movq 24(%rcx), %rax
+ andq %rdx, %rax
+ rolq %rdx
+ andq 16(%rcx), %rdx
+ addq %r10, %rsi
+ movq %rsi, (%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r14, %r15
+ movq %r15, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 16, 0x90
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: # @mcl_fpDbl_add5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %rbp, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r13
+ movq %r13, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r10
+ movq %r10, 40(%rdi)
+ adcq $0, %r9
+ movq %r9, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %r8
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 8(%r8), %r13
+ movq %r13, -72(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r8), %rbp
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq 24(%r8), %rsi
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ movq 32(%r8), %r10
+ movq 40(%r8), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ addq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq %r15, %r13
+ adcq %r14, %rbp
+ adcq %rdi, %r9
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, %rdi
+ adcq $0, %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq -16(%rsp), %r15 # 8-byte Reload
+ movq 8(%r15), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %rsi, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, 8(%rcx)
+ adcq %r13, %r10
+ adcq %rbp, %rbx
+ adcq %r9, %r14
+ adcq %rdi, %r12
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rdx, %r10
+ adcq -72(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r12 # 8-byte Folded Reload
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -96(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq 40(%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%r15), %rcx
+ mulq %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 32(%r8), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq 24(%r8), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 16(%r8), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq (%r8), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %rax, 16(%r10)
+ adcq %rbx, %r9
+ adcq %r14, %rbp
+ adcq %r12, %r11
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq %rdi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r9
+ adcq %r13, %rbp
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq 24(%rdi), %rbx
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ addq %r9, %rax
+ movq %rax, 24(%r10)
+ adcq %rbp, %r13
+ adcq %r11, %r12
+ adcq %r15, %r14
+ movq -24(%rsp), %rbp # 8-byte Reload
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq -32(%rsp), %rax # 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq %rdx, %r13
+ adcq %rsi, %r12
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %r10 # 8-byte Folded Reload
+ movq 40(%r8), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 32(%rdi), %rcx
+ movq 32(%r8), %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq 24(%r8), %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ movq 16(%r8), %rdi
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%r8), %r15
+ movq 8(%r8), %r9
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r15, %rax
+ mulq %rcx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r13, %rax
+ movq -8(%rsp), %r13 # 8-byte Reload
+ movq %rax, 32(%r13)
+ adcq %r12, %rbp
+ adcq %r14, %rdi
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq %r10, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ sbbq %rsi, %rsi
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq -112(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r15, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -104(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -96(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %r11 # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ addq %r9, %rbp
+ movq %rbp, 40(%r13)
+ adcq %r10, %rdi
+ adcq %rax, %rbx
+ adcq %r15, %r8
+ adcq %r14, %r11
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r12, %rdi
+ movq %rdi, 48(%r13)
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 56(%r13)
+ adcq %rdx, %r8
+ movq %r8, 64(%r13)
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 72(%r13)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 80(%r13)
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 88(%r13)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r8
+ movq %r8, -56(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r10
+ movq %r10, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq 40(%rsi), %r11
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rbx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ addq %r8, %rbp
+ adcq %rdx, %r15
+ adcq %r10, %r13
+ adcq %r9, %r12
+ adcq -16(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r8, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 8(%rax)
+ adcq %r15, %rbx
+ adcq %r13, %r11
+ adcq %r12, %r10
+ adcq %r14, %r9
+ movq %rcx, %rax
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq %rdi, %r10
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -72(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r12
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r14
+ movq %r14, -96(%rsp) # 8-byte Spill
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %r15, %rax
+ mulq %rdi
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %r13
+ addq %rbx, %r15
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %r15, 16(%rbx)
+ adcq %r11, %r8
+ adcq %r10, %r13
+ adcq %r14, %r9
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq -80(%rsp), %r14 # 8-byte Reload
+ adcq %rcx, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %r15
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ addq %r8, %rdi
+ movq %rdi, 24(%rbx)
+ adcq %r13, %r11
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %r14, %r10
+ movq -16(%rsp), %r12 # 8-byte Reload
+ adcq %rcx, %r12
+ sbbq %rcx, %rcx
+ movq (%rsi), %r8
+ andl $1, %ecx
+ movq 8(%rsi), %rbx
+ movq 40(%rsi), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rdi
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ addq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -16(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r8
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rdi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %r14, %r11
+ movq -8(%rsp), %r14 # 8-byte Reload
+ movq %r11, 32(%r14)
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r15, %r13
+ adcq %r10, %rsi
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq %r12, %rbp
+ addq -64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r14, %rbx
+ movq %r9, 40(%rbx)
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq %rdi, %rbp
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq -40(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 48(%rbx)
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 56(%rbx)
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 64(%rbx)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 72(%rbx)
+ adcq %r12, %rbp
+ movq %rbp, 80(%rbx)
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 88(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 16, 0x90
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: # @mcl_fp_mont6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -40(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %r12, %rax
+ mulq %rbx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ addq %r13, %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ adcq %r11, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq %r10, %rbp
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ adcq %r9, %rdi
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq %r15, -72(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -64(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq (%rcx), %r9
+ movq %r9, 8(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbp
+ movq %rbp, 32(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rsi
+ movq %rsi, 16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 24(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %r9
+ movq %rdx, %r9
+ addq %r15, %r9
+ adcq %r12, %r8
+ adcq %r14, %rbx
+ adcq %r10, %rbp
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq -120(%rsp), %rax # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbx # 8-byte Folded Reload
+ adcq -80(%rsp), %rbp # 8-byte Folded Reload
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %rcx, %rsi
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r12
+ adcq %r8, %rdi
+ adcq %rbx, %rsi
+ adcq %rbp, %r15
+ adcq %r13, %r14
+ adcq %r11, %rcx
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ imulq (%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r13, %rbp
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r9
+ adcq %rsi, %rbp
+ adcq %r15, %rcx
+ adcq %r14, %r10
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rdi
+ addq %r15, %rdi
+ adcq %r11, %rsi
+ adcq %rbx, %r12
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r13
+ adcq %rbp, %rdi
+ adcq %rcx, %rsi
+ adcq %r10, %r12
+ adcq %r8, %r14
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ imulq (%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r8, %r9
+ adcq %r10, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rdi, %r9
+ adcq %rsi, %rcx
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq -88(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r13, %rdi
+ adcq %r8, %rsi
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r12
+ adcq %rcx, %rdi
+ adcq %rbx, %rsi
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq (%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %r9, %r8
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r8
+ adcq %rsi, %rcx
+ adcq %r10, %rbx
+ adcq %r14, %r15
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq %r12, %r14
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r8, %r9
+ adcq %rcx, %r13
+ adcq %rbx, %r14
+ adcq %r15, %rdi
+ adcq %r11, %rbp
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r9, %rsi
+ imulq (%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %rcx, %r8
+ adcq %rbx, %r12
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %r13, %r8
+ adcq %r14, %r12
+ adcq %rdi, %r15
+ adcq %rbp, %r11
+ adcq -72(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -72(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq -88(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rsi, %r14
+ adcq %r9, %r10
+ movq -8(%rsp), %rax # 8-byte Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ movq -16(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %r8, %r13
+ movq %r13, -40(%rsp) # 8-byte Spill
+ adcq %r12, %rbp
+ adcq %r15, %r14
+ movq %r14, -24(%rsp) # 8-byte Spill
+ adcq %r11, %r10
+ movq %r10, -32(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %rbx, %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq (%rsp), %r9 # 8-byte Reload
+ imulq %r13, %r9
+ andl $1, %ecx
+ movq %r9, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ movq 8(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %r9, %rax
+ movq 16(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ movq 24(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ addq %r15, %rax
+ adcq %r11, %rdx
+ adcq -56(%rsp), %rsi # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r8
+ addq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rax
+ adcq -24(%rsp), %rdx # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -8(%rsp), %rdi # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %rbp
+ subq %r13, %rbp
+ movq %rdx, %r9
+ sbbq %r10, %r9
+ movq %rsi, %r10
+ sbbq %r14, %r10
+ movq %rdi, %r11
+ sbbq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r14
+ sbbq 32(%rsp), %r14 # 8-byte Folded Reload
+ movq %r8, %r15
+ sbbq 48(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r11
+ testb %cl, %cl
+ cmovneq %rax, %rbp
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %rbp, (%rax)
+ cmovneq %rdx, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rsi, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %rbx, %r14
+ movq %r14, 32(%rax)
+ cmovneq %r8, %r15
+ movq %r15, 40(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 16, 0x90
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: # @mcl_fp_montNF6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq %rdx, %r13
+ movq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r9
+ movq 24(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdx, %rbx
+ addq %rbp, %rbx
+ adcq %r15, %r12
+ adcq %r8, %r14
+ adcq %r10, %r11
+ adcq 32(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -96(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %r9
+ imulq %rdx, %r9
+ movq (%rcx), %r8
+ movq %r8, 8(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rsi
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbp
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %rsi
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %r8
+ addq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %r12, %rdi
+ adcq %r14, %rsi
+ adcq %r11, %r15
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbp
+ adcq %rcx, %rdi
+ adcq %r13, %rsi
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r8, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbp, %r13
+ adcq %rdi, %rcx
+ adcq %rsi, %rbx
+ adcq %r15, %r12
+ adcq %r10, %r11
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ movq %r13, %rsi
+ imulq (%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r13, %rax
+ adcq %rcx, %r15
+ adcq %rbx, %r10
+ adcq %r12, %r8
+ adcq %r11, %rbp
+ adcq %r9, %rdi
+ adcq $0, %r14
+ addq %rdx, %r15
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rsi
+ addq %rdi, %rsi
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r15, %r9
+ adcq %r10, %rsi
+ adcq %r8, %rbx
+ adcq %rbp, %r12
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq $0, %r13
+ movq %r9, %r8
+ imulq (%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ movq %r8, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rsi, %rdi
+ adcq %rbx, %r14
+ adcq %r12, %r10
+ adcq %rcx, %r15
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq %r11, %rax
+ adcq $0, %r13
+ addq %rdx, %rdi
+ adcq %rbp, %r14
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -96(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r12, %rbx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdi, %r8
+ adcq %r14, %rbp
+ adcq %r10, %rbx
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq $0, %r15
+ movq %r8, %r14
+ imulq (%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %r14, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r14, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r12
+ adcq %rsi, %r10
+ adcq %rcx, %r13
+ adcq %r11, %r9
+ adcq $0, %r15
+ addq %rdx, %rdi
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, -96(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq -128(%rsp), %rbx # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rdi, %r11
+ adcq %r12, %rbp
+ adcq %r10, %rbx
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq $0, %r14
+ movq %r11, %rcx
+ imulq (%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r15
+ adcq %rsi, %r10
+ adcq %r8, %r12
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq %r9, %rcx
+ adcq $0, %r14
+ addq %rdx, %rdi
+ adcq %r13, %r15
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -104(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -96(%rsp) # 8-byte Spill
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbp, %r10
+ adcq -24(%rsp), %r13 # 8-byte Folded Reload
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -8(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rdi, %r11
+ adcq %r15, %r8
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r14, %r9
+ movq %r9, -16(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq (%rsp), %r9 # 8-byte Reload
+ imulq %r11, %r9
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r9, %rax
+ movq -40(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -32(%rsp), %r9 # 8-byte Reload
+ mulq %r9
+ addq %r11, %r14
+ adcq %r8, %rax
+ adcq %r10, %rcx
+ adcq %r13, %rbp
+ adcq %r12, %rdi
+ adcq -16(%rsp), %rsi # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ adcq -8(%rsp), %rsi # 8-byte Folded Reload
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %r14
+ subq 8(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq %r9, %r8
+ movq %rbp, %r9
+ sbbq %r15, %r9
+ movq %rdi, %r10
+ sbbq 16(%rsp), %r10 # 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq 24(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 32(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, %rdx
+ sarq $63, %rdx
+ cmovsq %rax, %r14
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %r14, (%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 8(%rax)
+ cmovsq %rbp, %r9
+ movq %r9, 16(%rax)
+ cmovsq %rdi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rsi, %r11
+ movq %r11, 32(%rax)
+ cmovsq %rbx, %r15
+ movq %r15, 40(%rax)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 16, 0x90
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: # @mcl_fp_montRed6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq (%rcx), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rbp
+ imulq %rax, %rbp
+ movq 40(%rcx), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, %r8
+ movq 24(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %rcx
+ adcq %r14, %r13
+ adcq %r15, %r10
+ adcq %r12, %r8
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %rcx
+ adcq 24(%rsi), %r13
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rax
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rbx
+ movq 56(%rsi), %r15
+ adcq $0, %r15
+ adcq $0, %rbx
+ movq %rbx, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rbp, %rdi
+ imulq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %r11, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbx
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %r8 # 8-byte Reload
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbp, %rax
+ adcq %rcx, %rdi
+ adcq %r13, %rsi
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -88(%rsp) # 8-byte Spill
+ adcq %r15, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rdi, %rcx
+ imulq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r10, %r9
+ adcq %r8, %rbp
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rdi, %rax
+ adcq %rsi, %r9
+ adcq %rbx, %rbp
+ adcq %r14, %r13
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %r9, %rsi
+ imulq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %rbx, %rdi
+ adcq %r10, %rcx
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq %r11, %r8
+ adcq %r15, %r14
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %r12
+ movq %rdi, %rsi
+ imulq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ movq -40(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbx, %rsi
+ adcq %r15, %r10
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ movq -72(%rsp), %rbx # 8-byte Reload
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ movq -64(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r8, %r10
+ adcq %r14, %r13
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq -32(%rsp), %r8 # 8-byte Reload
+ imulq %rsi, %r8
+ movq %r8, %rax
+ mulq %r11
+ movq %rdx, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %r8, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %r8, %rax
+ movq -24(%rsp), %r8 # 8-byte Reload
+ mulq %r8
+ addq %r14, %rdx
+ adcq %r11, %rbp
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq -32(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rsi, %rax
+ adcq %r10, %rdx
+ adcq %r13, %rbp
+ adcq %r9, %rbx
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbp, %rsi
+ sbbq -16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbx, %r9
+ sbbq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq (%rsp), %r10 # 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 8(%rsp), %r11 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq -40(%rsp), %r14 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %rdi, %r14
+ testb %r12b, %r12b
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %rbx, %r9
+ movq %r9, 16(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 24(%rdx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rdx)
+ movq %r14, 40(%rdx)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 16, 0x90
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: # @mcl_fp_addPre6L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 16, 0x90
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: # @mcl_fp_subPre6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 16, 0x90
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: # @mcl_fp_shr1_6L
+# BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 16, 0x90
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: # @mcl_fp_add6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+.LBB89_2: # %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 16, 0x90
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: # @mcl_fp_addNF6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 16, 0x90
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: # @mcl_fp_sub6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB91_2
+# BB#1: # %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+.LBB91_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 16, 0x90
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: # @mcl_fp_subNF6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rsi), %r15
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r14
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r10
+ sbbq 24(%rdx), %r9
+ sbbq 32(%rdx), %r8
+ sbbq 40(%rdx), %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ addq %rbx, %rbx
+ movq %rdx, %rsi
+ adcq %rsi, %rsi
+ andq 8(%rcx), %rsi
+ movq %r15, %rax
+ shrq $63, %rax
+ orq %rbx, %rax
+ andq (%rcx), %rax
+ movq 40(%rcx), %r12
+ andq %rdx, %r12
+ movq 32(%rcx), %r13
+ andq %rdx, %r13
+ movq 24(%rcx), %rbx
+ andq %rdx, %rbx
+ andq 16(%rcx), %rdx
+ addq %r11, %rax
+ movq %rax, (%rdi)
+ adcq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r8, %r13
+ movq %r13, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 16, 0x90
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: # @mcl_fpDbl_add6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %rbp, %r8
+ movq %r8, 16(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r14, %r13
+ movq %r13, 32(%rdi)
+ adcq -16(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 48(%rdi)
+ adcq $0, %r10
+ movq %r10, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $24, %rsp
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, 16(%rsp) # 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 8(%r9), %r10
+ movq %r10, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %r11
+ movq %r11, -72(%rsp) # 8-byte Spill
+ movq 24(%r9), %rbx
+ movq %rbx, -56(%rsp) # 8-byte Spill
+ movq 32(%r9), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq 40(%r9), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ movq 48(%r9), %r14
+ movq %rax, (%rdi)
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r12
+ movq %rax, %r13
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, %r10
+ addq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r8, %r12
+ adcq %r15, %rbx
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r11 # 8-byte Reload
+ movq 8(%r11), %rcx
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %r10, %rax
+ movq 16(%rsp), %r10 # 8-byte Reload
+ movq %rax, 8(%r10)
+ adcq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %rbx, %r15
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ movq %r8, %rcx
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq (%rsp), %rax # 8-byte Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdx, %rdi
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq 48(%r9), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 16(%r11), %rcx
+ mulq %rcx
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq (%r9), %rsi
+ movq 8(%r9), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ addq %rdi, %rax
+ movq %rax, 16(%r10)
+ adcq %rbp, %r11
+ adcq %r15, %rbx
+ adcq %r13, %r14
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ movq -16(%rsp), %rdi # 8-byte Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ movq -96(%rsp), %rax # 8-byte Reload
+ adcq %r8, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %rdi
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq %rbp
+ addq %r11, %rax
+ movq 16(%rsp), %rsi # 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %rbx, %r10
+ adcq %r14, %r8
+ adcq %r12, %r15
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rsi # 8-byte Reload
+ adcq %rdi, %rsi
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rdx, %r10
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rbx # 8-byte Reload
+ movq 32(%rbx), %rcx
+ mulq %rcx
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq (%r9), %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ addq %r10, %rax
+ movq 16(%rsp), %rcx # 8-byte Reload
+ movq %rax, 32(%rcx)
+ adcq %r8, %r11
+ adcq %r15, %r14
+ adcq %r13, %rbp
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq -24(%rsp), %rcx # 8-byte Reload
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ adcq %rdi, %rax
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %r11
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ movq 40(%rbx), %rcx
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq -88(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r11, %rax
+ movq 16(%rsp), %rcx # 8-byte Reload
+ movq %rax, 40(%rcx)
+ adcq %r14, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rbx
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %rdi
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %r11
+ sbbq %rcx, %rcx
+ movq %r11, %rax
+ mulq 48(%r9)
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq 40(%r9)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq 32(%r9)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq 24(%r9)
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq 16(%r9)
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq 8(%r9)
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r11, %rax
+ mulq (%r9)
+ andl $1, %ecx
+ addq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %rdi # 8-byte Folded Reload
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ addq %rax, %r8
+ movq 16(%rsp), %r9 # 8-byte Reload
+ movq %r8, 48(%r9)
+ adcq %r12, %rsi
+ adcq %r14, %rbx
+ adcq %rbp, %r15
+ adcq %r13, %r10
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rsi
+ adcq -104(%rsp), %rbx # 8-byte Folded Reload
+ movq %r9, %rdx
+ movq %rsi, 56(%rdx)
+ movq %rbx, 64(%rdx)
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 72(%rdx)
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 80(%rdx)
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 96(%rdx)
+ adcq 8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdx)
+ addq $24, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdi, 8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq %r11, -64(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r9
+ movq %r9, -24(%rsp) # 8-byte Spill
+ movq 40(%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r8
+ movq (%rsi), %rbp
+ movq 8(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %r15
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ addq %rax, %rcx
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ adcq %r10, %r13
+ adcq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -32(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq -56(%rsp), %rcx # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %rcx, 8(%rax)
+ adcq %r11, %rbx
+ adcq %r12, %rbp
+ adcq %r13, %r14
+ adcq %r9, %r10
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq -8(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdx, %rbp
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq (%rsp), %r8 # 8-byte Folded Reload
+ movq 48(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rax, %r9
+ movq %r9, -104(%rsp) # 8-byte Spill
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r12, %rax
+ mulq %rdi
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbx, %r12
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r12, 16(%rax)
+ adcq %rbp, %r11
+ adcq %r14, %rdi
+ adcq %r9, %r10
+ adcq %r15, %r13
+ movq -88(%rsp), %r14 # 8-byte Reload
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %r8, %rax
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ adcq %rdx, %r10
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r11, %rbp
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 24(%rax)
+ adcq %rdi, %r15
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %r12
+ movq %r9, %rcx
+ adcq %r14, %rcx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ adcq %rbx, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ movq (%rsi), %r9
+ movq 8(%rsi), %rbp
+ movq 40(%rsi), %r11
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r11
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rbx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ addq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %r10 # 8-byte Folded Reload
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ movq 48(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rax, %rbp
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %rdi
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ addq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r15, 32(%rax)
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ adcq -8(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq %r14, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq (%rsp), %r9 # 8-byte Folded Reload
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r8
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq %r11
+ movq %rax, %r12
+ addq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r10, 40(%rax)
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r13, %r15
+ adcq %rbp, %r8
+ adcq %rcx, %r12
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %rbx, %rax
+ sbbq %r11, %r11
+ andl $1, %r11d
+ addq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r14 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -32(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rdx, %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq 48(%rsi), %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ addq %r9, %rsi
+ movq 8(%rsp), %r9 # 8-byte Reload
+ movq %rsi, 48(%r9)
+ adcq %r14, %rdi
+ adcq %r15, %r10
+ adcq -32(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq (%rsp), %r8 # 8-byte Folded Reload
+ adcq %r11, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r9, %rsi
+ movq %rdi, 56(%rsi)
+ movq %r10, 64(%rsi)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 72(%rsi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 80(%rsi)
+ adcq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 88(%rsi)
+ adcq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 96(%rsi)
+ adcq %rdx, %rcx
+ movq %rcx, 104(%rsi)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 16, 0x90
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: # @mcl_fp_mont7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $96, %rsp
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %rdx, %r15
+ movq 40(%rsi), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq %rdx, %r12
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r9
+ movq %r9, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r13, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ addq %r11, %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ adcq %r10, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq %r9, %rbp
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ adcq %r8, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq 80(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -64(%rsp) # 8-byte Spill
+ adcq 88(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -48(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -40(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq (%rcx), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rdx
+ movq %rdx, 64(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rsi
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq %r12
+ movq %rdx, %r12
+ addq %r9, %r12
+ adcq %r13, %rbp
+ adcq %r10, %r8
+ adcq %r15, %rbx
+ adcq -128(%rsp), %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ adcq -72(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %r13, %r15
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ movq %rcx, %rdx
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq -40(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq -64(%rsp), %r10 # 8-byte Reload
+ addq %r12, %r10
+ movq %r10, -64(%rsp) # 8-byte Spill
+ adcq %rbp, %rdi
+ adcq %r8, %r15
+ adcq %rbx, %r9
+ adcq %r11, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r14, %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r14, %r11
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ movq -72(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ addq -64(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq %r15, %rsi
+ adcq %r9, %rcx
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq -56(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r9, %rbx
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq -64(%rsp), %r9 # 8-byte Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ movq -56(%rsp), %rdi # 8-byte Reload
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %rsi, %r8
+ adcq %rcx, %rbx
+ adcq %r13, %r15
+ adcq %r12, %r9
+ movq %r9, -64(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r10, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r12, %r14
+ adcq %r13, %rsi
+ adcq %rcx, %rdi
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %r8, %r14
+ adcq %rbx, %rsi
+ adcq %r15, %rdi
+ adcq -64(%rsp), %r9 # 8-byte Folded Reload
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r13
+ addq %r12, %r13
+ adcq %r8, %rbp
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r14, %r10
+ adcq %rsi, %r13
+ adcq %rdi, %rbp
+ adcq %r9, %rbx
+ adcq %r11, %r15
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r10, %rsi
+ imulq 32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r11, %r14
+ adcq %r9, %r8
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r13, %r14
+ adcq %rbp, %r8
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq -56(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq -64(%rsp), %r11 # 8-byte Reload
+ adcq $0, %r11
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rbp
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r9, %r13
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ movq -64(%rsp), %r15 # 8-byte Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -56(%rsp), %rbp # 8-byte Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ movq %rsi, %r9
+ addq %r14, %r9
+ adcq %r8, %r10
+ adcq %rcx, %r13
+ adcq %rdi, %rbx
+ adcq %r12, %r15
+ movq %r15, -64(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r11, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r9, %rsi
+ movq %r9, %r11
+ imulq 32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r15, %r14
+ adcq %r12, %rcx
+ adcq %rdi, %rbp
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq -104(%rsp), %rsi # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %rax
+ adcq %r10, %r14
+ adcq %r13, %rcx
+ adcq %rbx, %rbp
+ adcq -64(%rsp), %r9 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r11
+ addq %r13, %r11
+ adcq %r15, %rsi
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ movq -56(%rsp), %r10 # 8-byte Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r14, %rdi
+ adcq %rcx, %r11
+ adcq %rbp, %rsi
+ adcq %r9, %rbx
+ adcq %r8, %r12
+ adcq -80(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ addq %r9, %r15
+ adcq %r8, %r13
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ addq %rdi, %rax
+ adcq %r11, %r15
+ adcq %rsi, %r13
+ adcq %rbx, %r10
+ adcq %r12, %rcx
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -56(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ addq %rbp, %rdx
+ movq %rdx, %rbp
+ adcq %rbx, %r9
+ adcq %r12, %r14
+ movq %r8, %rdi
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ adcq 8(%rsp), %r11 # 8-byte Folded Reload
+ movq 24(%rsp), %rbx # 8-byte Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq 16(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ addq %r15, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq %r10, %r9
+ movq %r9, (%rsp) # 8-byte Spill
+ adcq %rcx, %r14
+ movq %r14, -8(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -24(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq %rsi, %r8
+ movq %r8, 16(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 32(%rsp), %r10 # 8-byte Reload
+ imulq %rax, %r10
+ andl $1, %ecx
+ movq %r10, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ movq 48(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %r10, %rax
+ movq 40(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r10, %rax
+ movq 56(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ addq %r11, %rax
+ adcq %r12, %rdx
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq 32(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq 8(%rsp), %rax # 8-byte Folded Reload
+ adcq (%rsp), %rdx # 8-byte Folded Reload
+ adcq -8(%rsp), %rbp # 8-byte Folded Reload
+ adcq -16(%rsp), %rsi # 8-byte Folded Reload
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ adcq 24(%rsp), %r9 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq %r15, %r8
+ movq %rdx, %r10
+ sbbq %r14, %r10
+ movq %rbp, %r11
+ sbbq %r13, %r11
+ movq %rsi, %r14
+ sbbq 72(%rsp), %r14 # 8-byte Folded Reload
+ movq %rdi, %r15
+ sbbq 80(%rsp), %r15 # 8-byte Folded Reload
+ movq %r9, %r12
+ sbbq 88(%rsp), %r12 # 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq 64(%rsp), %r13 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r13
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %rbp, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rsi, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rdi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %r9, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $96, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 16, 0x90
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: # @mcl_fp_montNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq %rdx, %r9
+ movq 40(%rsi), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rdx, %r11
+ movq 32(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r8
+ movq %r8, -40(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %r10, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ addq %r12, %r8
+ adcq %r13, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq %r15, %rbx
+ movq %rbx, -88(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, %r12
+ adcq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq 72(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -56(%rsp) # 8-byte Spill
+ adcq $0, %r9
+ movq %r9, -64(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rax, %r14
+ imulq %rdx, %r9
+ movq (%rcx), %r11
+ movq %r11, 32(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r10
+ movq %r10, 64(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r10
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rsi
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %r11
+ addq %r14, %rax
+ adcq %r8, %rsi
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r12, %r13
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ movq -72(%rsp), %r8 # 8-byte Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbp
+ adcq %rbx, %r13
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rbx
+ movq %rbx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r10
+ adcq %rdi, %r12
+ adcq %rbp, %rcx
+ adcq %r13, %r9
+ adcq %r15, %r8
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rbx
+ imulq 24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r12, %rsi
+ adcq %rcx, %rbp
+ adcq %r9, %rdi
+ adcq %r8, %r14
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq %r11, %rcx
+ movq -64(%rsp), %r8 # 8-byte Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r15, %rdi
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -80(%rsp) # 8-byte Spill
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r11, %rcx
+ adcq %r9, %r15
+ adcq %r12, %rbx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %r13
+ adcq %rbp, %rcx
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ movq %r13, %r9
+ imulq 24(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r9, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r13, %rax
+ adcq %rcx, %rsi
+ adcq %r15, %r12
+ adcq %rbx, %r11
+ adcq %rdi, %rbp
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq %r14, %rcx
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq %r10, %rax
+ adcq $0, %r8
+ addq %rdx, %rsi
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rdi
+ addq %r9, %rdi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r14
+ adcq %r12, %rdi
+ adcq %r11, %rcx
+ adcq %rbp, %r10
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq %r8, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r14, %rsi
+ imulq 24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rdi, %rbx
+ adcq %rcx, %rbp
+ adcq %r10, %r8
+ adcq %r13, %r12
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq %r15, %rsi
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r9, %rbp
+ adcq %r11, %r8
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r14, %rdi
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r11
+ adcq %rbp, %r10
+ adcq %r8, %rdi
+ adcq %r12, %rcx
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r11, %rsi
+ imulq 24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rbx
+ adcq %rdi, %rbp
+ adcq %rcx, %r12
+ adcq %r9, %r14
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq %r13, %rdi
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r8, %rbp
+ adcq %r15, %r12
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -88(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r14, %r10
+ adcq %r15, %r8
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbx, %r11
+ adcq %rbp, %r10
+ adcq %r12, %r8
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq $0, %r9
+ movq %r11, %rbx
+ imulq 24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rcx
+ adcq %r8, %rbp
+ adcq %rdi, %r15
+ adcq %rsi, %r12
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq %r13, %rsi
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rdx, %rcx
+ adcq %r14, %rbp
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq %rbx, %r8
+ adcq %rsi, %r10
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ movq 8(%rsp), %rdx # 8-byte Reload
+ adcq (%rsp), %rdx # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rcx, %r12
+ adcq %rbp, %r8
+ adcq %r15, %r10
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq %r9, %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq 24(%rsp), %rdi # 8-byte Reload
+ imulq %r12, %rdi
+ movq %rdi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ mulq %rdi
+ addq %r12, %r15
+ adcq %r8, %rax
+ adcq %r10, %rbx
+ adcq %r11, %rcx
+ adcq %r13, %rsi
+ adcq 8(%rsp), %rbp # 8-byte Folded Reload
+ adcq 16(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ adcq -16(%rsp), %rbp # 8-byte Folded Reload
+ adcq (%rsp), %r9 # 8-byte Folded Reload
+ adcq 24(%rsp), %r14 # 8-byte Folded Reload
+ movq %rax, %r13
+ subq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r12
+ sbbq %rdi, %r12
+ movq %rcx, %r8
+ sbbq 40(%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %r10
+ sbbq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r15 # 8-byte Folded Reload
+ movq %r14, %rdx
+ sbbq 72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r13
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %r13, (%rax)
+ cmovsq %rbx, %r12
+ movq %r12, 8(%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rbp, %r11
+ movq %r11, 32(%rax)
+ cmovsq %r9, %r15
+ movq %r15, 40(%rax)
+ cmovsq %r14, %rdx
+ movq %rdx, 48(%rax)
+ addq $80, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 16, 0x90
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: # @mcl_fp_montRed7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $64, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq (%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ imulq %rax, %rbp
+ movq 48(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 32(%rcx), %r10
+ movq %r10, 56(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %r12
+ adcq %r8, %r14
+ adcq %r11, %r10
+ adcq %r9, %r15
+ adcq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq 24(%rsp), %rax # 8-byte Folded Reload
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r14
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r15
+ adcq 48(%rsi), %r13
+ movq %r13, -80(%rsp) # 8-byte Spill
+ adcq 56(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -8(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq %rbp, %rdi
+ imulq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %rcx, %r9
+ adcq %r11, %r8
+ adcq %rbx, %rsi
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ movq -64(%rsp), %rdx # 8-byte Reload
+ adcq -112(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rbp, %rax
+ adcq %r12, %r9
+ adcq %r14, %r8
+ adcq %r10, %rsi
+ adcq %r15, %r13
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r9, %rcx
+ imulq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r14, %r10
+ adcq %r12, %rdi
+ adcq %r11, %rbp
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %r11 # 8-byte Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r9, %rax
+ adcq %r8, %r10
+ adcq %rsi, %rdi
+ adcq %r13, %rbp
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -88(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r10, %rbx
+ imulq 8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -16(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r13, %rcx
+ adcq %r14, %rsi
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq -72(%rsp), %r11 # 8-byte Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -64(%rsp), %rbx # 8-byte Reload
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %rdi, %r9
+ adcq %rbp, %rcx
+ adcq %r15, %rsi
+ adcq -88(%rsp), %r8 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -72(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r9, %rbp
+ imulq 8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq %r12
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ movq 32(%rsp), %rbp # 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r10
+ addq %r12, %r10
+ adcq %r11, %rbx
+ adcq %r14, %rdi
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ movq -48(%rsp), %r11 # 8-byte Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ movq -40(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %rcx, %r10
+ adcq %rsi, %rbx
+ adcq %r8, %rdi
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -48(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r10, %rsi
+ imulq 8(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ movq (%rsp), %r8 # 8-byte Reload
+ mulq %r8
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %r11, %r14
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq -32(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %rbx, %rbp
+ adcq %rdi, %r14
+ adcq %r13, %r9
+ adcq %r15, %r12
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ imulq %rbp, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rdx
+ adcq %r8, %rbx
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq -40(%rsp), %r10 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rbp, %rax
+ adcq %r14, %rdx
+ adcq %r9, %rbx
+ adcq %r12, %rdi
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r13 # 8-byte Folded Reload
+ adcq -8(%rsp), %rsi # 8-byte Folded Reload
+ movq 24(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq 32(%rsp), %rax # 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq 16(%rsp), %rbp # 8-byte Folded Reload
+ movq %rdi, %r8
+ sbbq 40(%rsp), %r8 # 8-byte Folded Reload
+ movq %r10, %r9
+ sbbq 48(%rsp), %r9 # 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r13, %r14
+ sbbq (%rsp), %r14 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq -16(%rsp), %r12 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rsi, %r12
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rbx, %rbp
+ movq %rbp, 8(%rcx)
+ cmovneq %rdi, %r8
+ movq %r8, 16(%rcx)
+ cmovneq %r10, %r9
+ movq %r9, 24(%rcx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rcx)
+ cmovneq %r13, %r14
+ movq %r14, 40(%rcx)
+ movq %r12, 48(%rcx)
+ addq $64, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 16, 0x90
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: # @mcl_fp_addPre7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 16, 0x90
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: # @mcl_fp_subPre7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 16, 0x90
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: # @mcl_fp_shr1_7L
+# BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 16, 0x90
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: # @mcl_fp_add7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+.LBB104_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 16, 0x90
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: # @mcl_fp_addNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 16, 0x90
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: # @mcl_fp_sub7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB106_2
+# BB#1: # %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+.LBB106_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 16, 0x90
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: # @mcl_fp_subNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r12
+ movq 40(%rsi), %rax
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %r14
+ movq 8(%rsi), %r15
+ subq (%rdx), %r14
+ sbbq 8(%rdx), %r15
+ sbbq 16(%rdx), %r11
+ sbbq 24(%rdx), %r10
+ sbbq 32(%rdx), %r9
+ sbbq 40(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %r12
+ movq %r12, %rax
+ sarq $63, %rax
+ movq %rax, %rsi
+ shldq $1, %r12, %rsi
+ andq (%r8), %rsi
+ movq 48(%r8), %r13
+ andq %rax, %r13
+ movq 40(%r8), %rbx
+ andq %rax, %rbx
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ movq 24(%r8), %rbp
+ andq %rax, %rbp
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %r14, %rsi
+ adcq %r15, %rax
+ movq %rsi, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %r11, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 32(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 40(%rdi)
+ adcq %r12, %r13
+ movq %r13, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 16, 0x90
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: # @mcl_fpDbl_add7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -24(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 16, 0x90
+ .type .LmulPv512x64,@function
+.LmulPv512x64: # @mulPv512x64
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rcx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ addq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %rbx
+ movq %rbx, 40(%rdi)
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 48(%rdi)
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 56(%rdi)
+ adcq $0, %r10
+ movq %r10, 64(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L
+# BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %rbx
+ movq %rsi, %r15
+ movq %rdi, %r14
+ callq mcl_fpDbl_mulPre4L@PLT
+ leaq 64(%r14), %rdi
+ leaq 32(%r15), %rsi
+ leaq 32(%rbx), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rcx
+ movq (%rbx), %rdx
+ movq 8(%rbx), %rsi
+ addq 32(%rbx), %rdx
+ adcq 40(%rbx), %rsi
+ adcq 16(%rbx), %rcx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rdi
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rdi
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -176(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rcx, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -192(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -200(%rbp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rdi, -112(%rbp)
+ movq %rdx, -168(%rbp)
+ movq %rsi, -160(%rbp)
+ movq %rcx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -208(%rbp) # 8-byte Spill
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ addq -200(%rbp), %r12 # 8-byte Folded Reload
+ adcq -192(%rbp), %rbx # 8-byte Folded Reload
+ adcq -184(%rbp), %r13 # 8-byte Folded Reload
+ movq -216(%rbp), %r10 # 8-byte Reload
+ adcq -176(%rbp), %r10 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -208(%rbp), %rdx # 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -72(%rbp), %r12
+ adcq -64(%rbp), %rbx
+ adcq -56(%rbp), %r13
+ adcq -48(%rbp), %r10
+ adcq %rax, %r15
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -184(%rbp) # 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -176(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -192(%rbp) # 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -200(%rbp) # 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -208(%rbp) # 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -224(%rbp) # 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -232(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -208(%rbp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -216(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -224(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -232(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %r14
+ movq %rdi, %rbx
+ movq %r14, %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ leaq 64(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ movq %rsi, %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq (%r14), %r12
+ movq 8(%r14), %r15
+ addq 32(%r14), %r12
+ adcq 40(%r14), %r15
+ pushfq
+ popq %rax
+ movq %r12, -136(%rbp)
+ movq %r12, -168(%rbp)
+ addq %r12, %r12
+ movq %r15, -128(%rbp)
+ movq %r15, -160(%rbp)
+ adcq %r15, %r15
+ pushfq
+ popq %rcx
+ movq 56(%r14), %r13
+ movq 48(%r14), %rdx
+ pushq %rax
+ popfq
+ adcq 16(%r14), %rdx
+ adcq 24(%r14), %r13
+ pushfq
+ popq %r8
+ pushfq
+ popq %rsi
+ pushfq
+ popq %rdi
+ sbbq %rax, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ xorl %eax, %eax
+ pushq %rdi
+ popfq
+ cmovaeq %rax, %r15
+ movq %r15, -176(%rbp) # 8-byte Spill
+ cmovaeq %rax, %r12
+ movq %rdx, -120(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %rdx, %r15
+ pushq %rcx
+ popfq
+ adcq %r15, %r15
+ movq %r13, %r14
+ movq %r13, -112(%rbp)
+ movq %r13, -144(%rbp)
+ adcq %r13, %r13
+ pushq %rsi
+ popfq
+ cmovaeq %rax, %r13
+ cmovaeq %rax, %r15
+ shrq $63, %r14
+ pushq %r8
+ popfq
+ cmovaeq %rax, %r14
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq -184(%rbp), %rax # 8-byte Reload
+ andl $1, %eax
+ addq -72(%rbp), %r12
+ movq -176(%rbp), %r8 # 8-byte Reload
+ adcq -64(%rbp), %r8
+ adcq -56(%rbp), %r15
+ adcq -48(%rbp), %r13
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%rbx), %rsi
+ sbbq 8(%rbx), %rdx
+ sbbq 16(%rbx), %rcx
+ sbbq 24(%rbx), %rax
+ movq 32(%rbx), %r10
+ movq %r10, -184(%rbp) # 8-byte Spill
+ movq 40(%rbx), %r9
+ movq %r9, -176(%rbp) # 8-byte Spill
+ sbbq %r10, %r12
+ sbbq %r9, %r8
+ movq %r8, %r10
+ movq 48(%rbx), %r8
+ movq %r8, -192(%rbp) # 8-byte Spill
+ sbbq %r8, %r15
+ movq 56(%rbx), %r8
+ movq %r8, -200(%rbp) # 8-byte Spill
+ sbbq %r8, %r13
+ sbbq $0, %rdi
+ movq 64(%rbx), %r11
+ subq %r11, %rsi
+ movq 72(%rbx), %r8
+ movq %r8, -208(%rbp) # 8-byte Spill
+ sbbq %r8, %rdx
+ movq 80(%rbx), %r8
+ movq %r8, -216(%rbp) # 8-byte Spill
+ sbbq %r8, %rcx
+ movq 88(%rbx), %r8
+ movq %r8, -224(%rbp) # 8-byte Spill
+ sbbq %r8, %rax
+ movq 96(%rbx), %r8
+ movq %r8, -232(%rbp) # 8-byte Spill
+ sbbq %r8, %r12
+ movq 104(%rbx), %r14
+ sbbq %r14, %r10
+ movq 112(%rbx), %r8
+ sbbq %r8, %r15
+ movq 120(%rbx), %r9
+ sbbq %r9, %r13
+ sbbq $0, %rdi
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%rbx)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%rbx)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%rbx)
+ adcq %r11, %r12
+ movq %rax, 56(%rbx)
+ movq %r12, 64(%rbx)
+ adcq -208(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 72(%rbx)
+ adcq -216(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 80(%rbx)
+ adcq -224(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rbx)
+ adcq -232(%rbp), %rdi # 8-byte Folded Reload
+ movq %rdi, 96(%rbx)
+ adcq $0, %r14
+ movq %r14, 104(%rbx)
+ adcq $0, %r8
+ movq %r8, 112(%rbx)
+ adcq $0, %r9
+ movq %r9, 120(%rbx)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 16, 0x90
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: # @mcl_fp_mont8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp # imm = 0x4E8
+ movq %rcx, %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 8(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 48(%rsp) # 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq 88(%rsp), %rbp # 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, 88(%rsp) # 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 96(%rsp), %rbx # 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %rcx # 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 72(%rsp), %rsi # 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r8 # 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 40(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq 88(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp # imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 16, 0x90
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: # @mcl_fp_montNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp # imm = 0x4D8
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 1088(%rsp), %r15
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 952(%rsp), %r13
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 64(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 944(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 808(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 664(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 656(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 512(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 376(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 368(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 232(%rsp), %r12
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 224(%rsp), %rcx
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 88(%rsp), %rbp
+ movq 48(%rsp), %r11 # 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 64(%rsp), %rsi # 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 64(%rsp) # 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq 80(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 56(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 72(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp # imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 16, 0x90
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: # @mcl_fp_montRed8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp # imm = 0x308
+ movq %rdx, %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq %rdi, 72(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 128(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 184(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 704(%rsp), %rdi
+ callq .LmulPv512x64
+ addq 704(%rsp), %r15
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 144(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 192(%rsp) # 8-byte Folded Spill
+ movq 160(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 176(%rsp) # 8-byte Folded Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ movq 136(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi # 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rcx # 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 168(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 136(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 112(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 560(%rsp), %r14
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 88(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 176(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 168(%rsp) # 8-byte Spill
+ movq 120(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 136(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 128(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 488(%rsp), %rbp
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 152(%rsp), %rbp # 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %r13 # 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 120(%rsp) # 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 184(%rsp), %r14 # 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 192(%rsp), %rbp # 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 344(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 144(%rsp) # 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 184(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 192(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %r13 # 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 168(%rsp), %r12 # 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 120(%rsp), %r14 # 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 272(%rsp), %r15
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 176(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 168(%rsp) # 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ movq 128(%rsp), %rdx # 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 200(%rsp), %rbx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %r8 # 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rdx # 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 176(%rsp), %rsi # 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 168(%rsp), %rdi # 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 32(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 40(%rsp), %r14 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 48(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 184(%rsp), %rax # 8-byte Folded Reload
+ movq 72(%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 192(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 136(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp # imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 16, 0x90
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: # @mcl_fp_addPre8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 16, 0x90
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: # @mcl_fp_subPre8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 16, 0x90
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: # @mcl_fp_shr1_8L
+# BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 16, 0x90
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: # @mcl_fp_add8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+.LBB120_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 16, 0x90
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: # @mcl_fp_addNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ adcq 40(%rsi), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rbx, %r9
+ adcq 48(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 16, 0x90
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: # @mcl_fp_sub8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB122_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+.LBB122_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 16, 0x90
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: # @mcl_fp_subNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movq 56(%rsi), %r14
+ movq 48(%rsi), %rax
+ movq 40(%rsi), %rcx
+ movq 32(%rsi), %rdi
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %r11
+ sbbq 32(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %r14
+ movq %r14, %rsi
+ sarq $63, %rsi
+ movq 56(%r8), %r10
+ andq %rsi, %r10
+ movq 48(%r8), %rbx
+ andq %rsi, %rbx
+ movq 40(%r8), %rdi
+ andq %rsi, %rdi
+ movq 32(%r8), %rbp
+ andq %rsi, %rbp
+ movq 24(%r8), %rdx
+ andq %rsi, %rdx
+ movq 16(%r8), %rcx
+ andq %rsi, %rcx
+ movq 8(%r8), %rax
+ andq %rsi, %rax
+ andq (%r8), %rsi
+ addq %r13, %rsi
+ adcq %r12, %rax
+ movq %rsi, (%r9)
+ adcq %r15, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r11, %rdx
+ movq %rdx, 24(%r9)
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%r9)
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 48(%r9)
+ adcq %r14, %r10
+ movq %r10, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 16, 0x90
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: # @mcl_fpDbl_add8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, %rax
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -8(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -32(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 16, 0x90
+ .type .LmulPv576x64,@function
+.LmulPv576x64: # @mulPv576x64
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rbx
+ movq %rbx, %rax
+ mulq (%rsi)
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq 64(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r13, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r9
+ movq %r9, 32(%rdi)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rdi)
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 56(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 64(%rdi)
+ adcq $0, %r10
+ movq %r10, 72(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rdx, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq %rdi, %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ callq .LmulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r15 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %r14
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 80(%rsp), %r14 # 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq (%rsp), %r12 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq (%rsp), %r13 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq (%rsp), %r14 # 8-byte Folded Reload
+ adcq 8(%rsp), %r15 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r12 # 8-byte Folded Reload
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 80(%rsp), %rcx # 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rsi, %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ movq %rdi, %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 16(%rsp), %r14 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax # 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx # 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 16, 0x90
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: # @mcl_fp_mont9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r14 # 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 40(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 96(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rbx # 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r12 # 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r14 # 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 104(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 16(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 8(%rsp), %rbp # 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r9 # 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r10 # 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rdi # 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 56(%rsp), %rdx # 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 72(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq 80(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 64(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 16, 0x90
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: # @mcl_fp_montNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r12
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1392(%rsp), %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 8(%rsp), %r15 # 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rdx # 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 96(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 8(%rsp) # 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1232(%rsp), %rax
+ movq 8(%rsp), %rcx # 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 56(%rsp) # 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r13 # 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1072(%rsp), %rax
+ movq 104(%rsp), %rcx # 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 56(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 920(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 912(%rsp), %rax
+ movq 56(%rsp), %rcx # 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdx # 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, 104(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 760(%rsp), %rbx
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 752(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rdx # 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 600(%rsp), %r13
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rdx # 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 88(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 440(%rsp), %r14
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 88(%rsp), %r15 # 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 432(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rdx # 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 112(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r14 # 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 280(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 56(%rsp) # 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 272(%rsp), %rcx
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ addq 120(%rsp), %rbx
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 88(%rsp), %rsi # 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdi # 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, 104(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 96(%rsp), %r9 # 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq 104(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 16, 0x90
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: # @mcl_fp_montRed9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp # imm = 0x3A8
+ movq %rdx, %rax
+ movq %rax, 128(%rsp) # 8-byte Spill
+ movq %rdi, 80(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, 192(%rsp) # 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 856(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 856(%rsp), %r14
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 200(%rsp) # 8-byte Folded Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ movq 152(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 696(%rsp), %r15
+ movq (%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 168(%rsp), %r14 # 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 176(%rsp), %r15 # 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ movq 184(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 152(%rsp) # 8-byte Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 616(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 136(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 168(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq 192(%rsp), %r14 # 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 536(%rsp), %rbx
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 136(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 176(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, 192(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 208(%rsp), %r15 # 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 184(%rsp), %rbp # 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 144(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 456(%rsp), %r14
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rcx # 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 208(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 184(%rsp) # 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq 152(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 376(%rsp), %r15
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r15 # 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 296(%rsp), %rbx
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq 192(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 152(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 120(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, 192(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r9 # 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %r8 # 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rbx # 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 152(%rsp), %rdx # 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 152(%rsp) # 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 40(%rsp), %r15 # 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 48(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 64(%rsp), %r8 # 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 72(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq 192(%rsp), %rsi # 8-byte Folded Reload
+ movq 80(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 160(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 200(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 208(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 152(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp # imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 16, 0x90
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: # @mcl_fp_addPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 16, 0x90
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: # @mcl_fp_subPre9L
+# BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 16, 0x90
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: # @mcl_fp_shr1_9L
+# BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 16, 0x90
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: # @mcl_fp_add9L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+.LBB136_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 16, 0x90
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: # @mcl_fp_addNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq %r9, %rdi
+ adcq 56(%rsi), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -16(%rsp), %r14 # 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -8(%rsp), %r11 # 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 16, 0x90
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: # @mcl_fp_sub9L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB138_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+.LBB138_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 16, 0x90
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: # @mcl_fp_subNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r11
+ movq 64(%rsi), %r14
+ movq 56(%rsi), %rax
+ movq 48(%rsi), %rcx
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r15
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ sbbq 32(%rdx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 64(%rdx), %r14
+ movq %r14, %rax
+ sarq $63, %rax
+ movq %rax, %rcx
+ shldq $1, %r14, %rcx
+ movq 24(%r8), %rbp
+ andq %rcx, %rbp
+ movq 8(%r8), %rdi
+ andq %rcx, %rdi
+ andq (%r8), %rcx
+ movq 64(%r8), %rbx
+ andq %rax, %rbx
+ movq 56(%r8), %r10
+ andq %rax, %r10
+ rolq %rax
+ movq 48(%r8), %r9
+ andq %rax, %r9
+ movq 40(%r8), %rsi
+ andq %rax, %rsi
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ andq 16(%r8), %rax
+ addq %r12, %rcx
+ adcq %r13, %rdi
+ movq %rcx, (%r11)
+ adcq %r15, %rax
+ movq %rdi, 8(%r11)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rax, 16(%r11)
+ movq %rbp, 24(%r11)
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 32(%r11)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%r11)
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%r11)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 56(%r11)
+ adcq %r14, %rbx
+ movq %rbx, 64(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 16, 0x90
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: # @mcl_fpDbl_add9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi # 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -8(%rsp), %r8 # 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -40(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -16(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s
new file mode 100644
index 000000000..849c66649
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s
@@ -0,0 +1,13830 @@
+ .section __TEXT,__text,regular,pure_instructions
+ .macosx_version_min 10, 12
+ .globl _makeNIST_P192Lbmi2
+ .p2align 4, 0x90
+_makeNIST_P192Lbmi2: ## @makeNIST_P192Lbmi2
+## BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P192Lbmi2: ## @mcl_fpDbl_mod_NIST_P192Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_sqr_NIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sqr_NIST_P192Lbmi2: ## @mcl_fp_sqr_NIST_P192Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %r8, %rdx
+ mulxq %rsi, %r14, %rbx
+ movq %rbx, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rdx
+ mulxq %rsi, %r13, %r15
+ mulxq %rcx, %r12, %rsi
+ addq %rsi, %r13
+ adcq %r14, %r15
+ adcq $0, %rbx
+ movq %rcx, %rdx
+ mulxq %rcx, %r9, %rax
+ addq %r12, %rax
+ movq %r8, %rdx
+ mulxq %rcx, %rbp, %r11
+ adcq %rbp, %rsi
+ movq %r11, %r10
+ adcq $0, %r10
+ addq %r12, %rax
+ adcq %r13, %rsi
+ adcq %r15, %r10
+ adcq $0, %rbx
+ mulxq %r8, %rcx, %rdi
+ addq %r14, %r11
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rsi
+ adcq %r10, %r11
+ adcq %rbx, %rcx
+ adcq $0, %rdi
+ addq %rdi, %rax
+ adcq $0, %rsi
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq %r11, %r9
+ adcq %rcx, %rax
+ adcq %rdi, %rsi
+ adcq $0, %rdx
+ addq %rdi, %r9
+ adcq %r11, %rax
+ adcq %rcx, %rsi
+ adcq $0, %rdx
+ addq %rdx, %r9
+ adcq %rax, %rdx
+ adcq $0, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r9, %rcx
+ addq $1, %rcx
+ movq %rdx, %rdi
+ adcq $1, %rdi
+ movq %rsi, %rbp
+ adcq $0, %rbp
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r9, %rcx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rcx, (%rbx)
+ testb %al, %al
+ cmovneq %rdx, %rdi
+ movq %rdi, 8(%rbx)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulNIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulNIST_P192Lbmi2: ## @mcl_fp_mulNIST_P192Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq _mcl_fpDbl_mulPre3Lbmi2
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P521Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P521Lbmi2: ## @mcl_fpDbl_mod_NIST_P521Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ ## kill: %EBX<def> %EBX<kill> %RBX<kill> %RBX<def>
+ andl $511, %ebx ## imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movl %ebx, %esi
+ shrl $9, %esi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx ## imm = 0xFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je LBB4_1
+## BB#3: ## %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx ## imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp LBB4_2
+LBB4_1: ## %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+LBB4_2: ## %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre1Lbmi2: ## @mcl_fp_mulUnitPre1Lbmi2
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre1Lbmi2: ## @mcl_fpDbl_mulPre1Lbmi2
+## BB#0:
+ movq (%rdx), %rdx
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre1Lbmi2: ## @mcl_fpDbl_sqrPre1Lbmi2
+## BB#0:
+ movq (%rsi), %rdx
+ mulxq %rdx, %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mont1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont1Lbmi2: ## @mcl_fp_mont1Lbmi2
+## BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ movq %rax, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rdx
+ testb $1, %dl
+ cmovneq %rax, %rsi
+ movq %rsi, (%rdi)
+ retq
+
+ .globl _mcl_fp_montNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF1Lbmi2: ## @mcl_fp_montNF1Lbmi2
+## BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ movq %rax, %rdx
+ subq %rcx, %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_montRed1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed1Lbmi2: ## @mcl_fp_montRed1Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ movq %rax, %rdx
+ mulxq %r8, %rax, %rdx
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+
+ .globl _mcl_fp_addPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre1Lbmi2: ## @mcl_fp_addPre1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre1Lbmi2: ## @mcl_fp_subPre1Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_1Lbmi2: ## @mcl_fp_shr1_1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_add1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add1Lbmi2: ## @mcl_fp_add1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB14_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+LBB14_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF1Lbmi2: ## @mcl_fp_addNF1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_sub1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub1Lbmi2: ## @mcl_fp_sub1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB16_2
+## BB#1: ## %nocarry
+ retq
+LBB16_2: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_subNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF1Lbmi2: ## @mcl_fp_subNF1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add1Lbmi2: ## @mcl_fpDbl_add1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub1Lbmi2: ## @mcl_fpDbl_sub1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre2Lbmi2: ## @mcl_fp_mulUnitPre2Lbmi2
+## BB#0:
+ mulxq 8(%rsi), %rax, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %rax, %rsi
+ movq %rsi, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre2Lbmi2: ## @mcl_fpDbl_mulPre2Lbmi2
+## BB#0:
+ movq %rdx, %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r8
+ movq (%r10), %rsi
+ movq %r11, %rdx
+ mulxq %rsi, %rdx, %r9
+ movq %rdx, (%rdi)
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rax
+ addq %r9, %rsi
+ adcq $0, %rax
+ movq 8(%r10), %rcx
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r8, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre2Lbmi2: ## @mcl_fpDbl_sqrPre2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rsi
+ movq %rdx, (%rdi)
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %rdx, %rsi
+ movq %r8, %rax
+ adcq $0, %rax
+ addq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mont2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont2Lbmi2: ## @mcl_fp_mont2Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %r13
+ movq %r8, %rdx
+ mulxq %rax, %r14, %rsi
+ addq %r10, %rsi
+ adcq $0, %r13
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r14, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r15
+ mulxq %r15, %r12, %rcx
+ mulxq %r10, %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rdx
+ adcq %rsi, %rbx
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %r14
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rax
+ adcq %rsi, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r15, %rcx, %rbx
+ mulxq %r10, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rdx
+ adcq %rax, %rbp
+ adcq %r14, %rbx
+ adcq $0, %rsi
+ movq %rbp, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r15, %rcx
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rcx
+ testb %sil, %sil
+ cmovneq %rbp, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF2Lbmi2: ## @mcl_fp_montNF2Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %r15, %r13
+ addq %r10, %r13
+ adcq $0, %rsi
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r15, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r14
+ mulxq %r10, %rcx, %r12
+ addq %r15, %rcx
+ mulxq %r14, %rbx, %rcx
+ adcq %r13, %rbx
+ adcq $0, %rsi
+ addq %r12, %rbx
+ adcq %rcx, %rsi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %rcx
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %rcx
+ addq %rbx, %r8
+ adcq %rsi, %rax
+ adcq $0, %rcx
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r14, %rbx, %rsi
+ mulxq %r10, %rbp, %rdx
+ addq %r8, %rbp
+ adcq %rax, %rbx
+ adcq $0, %rcx
+ addq %rdx, %rbx
+ adcq %rsi, %rcx
+ movq %rbx, %rax
+ subq %r10, %rax
+ movq %rcx, %rdx
+ sbbq %r14, %rdx
+ cmovsq %rbx, %rax
+ movq %rax, (%rdi)
+ cmovsq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed2Lbmi2: ## @mcl_fp_montRed2Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r15
+ movq (%rdx), %r8
+ movq (%rsi), %r10
+ movq %r10, %rcx
+ imulq %r15, %rcx
+ movq 8(%rdx), %r9
+ movq %rcx, %rdx
+ mulxq %r9, %r11, %r14
+ mulxq %r8, %rcx, %rax
+ addq %r11, %rax
+ adcq $0, %r14
+ movq 24(%rsi), %r11
+ addq %r10, %rcx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r14
+ adcq $0, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ imulq %rax, %r15
+ movq %r15, %rdx
+ mulxq %r9, %r10, %rbx
+ mulxq %r8, %rsi, %rdx
+ addq %r10, %rdx
+ adcq $0, %rbx
+ addq %rax, %rsi
+ adcq %r14, %rdx
+ adcq %r11, %rbx
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbx, %rsi
+ sbbq %r9, %rsi
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %rsi
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre2Lbmi2: ## @mcl_fp_addPre2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre2Lbmi2: ## @mcl_fp_subPre2Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_2Lbmi2: ## @mcl_fp_shr1_2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_add2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add2Lbmi2: ## @mcl_fp_add2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB29_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+LBB29_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF2Lbmi2: ## @mcl_fp_addNF2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_sub2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub2Lbmi2: ## @mcl_fp_sub2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB31_2
+## BB#1: ## %nocarry
+ retq
+LBB31_2: ## %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF2Lbmi2: ## @mcl_fp_subNF2Lbmi2
+## BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add2Lbmi2: ## @mcl_fpDbl_add2Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub2Lbmi2: ## @mcl_fpDbl_sub2Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre3Lbmi2: ## @mcl_fp_mulUnitPre3Lbmi2
+## BB#0:
+ mulxq 16(%rsi), %r8, %rcx
+ mulxq 8(%rsi), %r9, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r9, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre3Lbmi2: ## @mcl_fpDbl_mulPre3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r8
+ movq (%r9), %rax
+ movq %r10, %rdx
+ mulxq %rax, %rdx, %r14
+ movq 16(%rsi), %r11
+ movq %rdx, (%rdi)
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ movq %r8, %rdx
+ mulxq %rax, %rax, %rcx
+ addq %r14, %rax
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ movq 8(%r9), %rsi
+ movq %r10, %rdx
+ mulxq %rsi, %rdx, %r14
+ addq %rax, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r11, %rdx
+ mulxq %rsi, %rax, %r15
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rcx, %rsi
+ adcq %rbx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r14, %rsi
+ adcq %rdx, %rax
+ adcq %r15, %rcx
+ movq 16(%r9), %rbx
+ movq %r10, %rdx
+ mulxq %rbx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r11, %rdx
+ mulxq %rbx, %rsi, %r10
+ movq %r8, %rdx
+ mulxq %rbx, %rbx, %rdx
+ adcq %rax, %rbx
+ adcq %rcx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sqrPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre3Lbmi2: ## @mcl_fpDbl_sqrPre3Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rax
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %r11, %r8
+ movq %rsi, %rdx
+ mulxq %rcx, %rdx, %r14
+ addq %rdx, %rax
+ movq %r14, %rbx
+ adcq %r11, %rbx
+ movq %r8, %rcx
+ adcq $0, %rcx
+ addq %rdx, %rax
+ movq %rax, 8(%rdi)
+ movq %r10, %rdx
+ mulxq %rsi, %rax, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rbx, %rsi
+ adcq %rax, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %r10, %rsi, %rdx
+ adcq %rax, %rcx
+ adcq %rbx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_mont3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont3Lbmi2: ## @mcl_fp_mont3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r12
+ movq (%r14), %rax
+ movq %r14, -16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ movq %r12, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r11, %rbp
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ movq %r15, %rdx
+ movq %r15, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r9, %rdi
+ addq %rbx, %rdi
+ adcq %r11, %r8
+ adcq $0, %rbp
+ movq -8(%rcx), %r13
+ movq %r9, %rdx
+ imulq %r13, %rdx
+ movq 8(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %r11, %r10
+ movq (%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ addq %r11, %rbx
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rcx, %rax
+ adcq %r10, %rcx
+ adcq $0, %rax
+ addq %r9, %rsi
+ adcq %rdi, %rbx
+ movq 8(%r14), %rdx
+ adcq %r8, %rcx
+ adcq %rbp, %rax
+ sbbq %r9, %r9
+ andl $1, %r9d
+ mulxq %r12, %r11, %rdi
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r10, %rsi
+ mulxq %r15, %r8, %rbp
+ addq %r10, %rbp
+ adcq %r11, %rsi
+ adcq $0, %rdi
+ addq %rbx, %r8
+ adcq %rcx, %rbp
+ adcq %rax, %rsi
+ adcq %r9, %rdi
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rdx
+ imulq %r13, %rdx
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r9, %rcx
+ mulxq -56(%rsp), %r10, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdx, %rbx ## 8-byte Folded Reload
+ addq %r10, %rbx
+ adcq %r9, %rax
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %rbp, %rbx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq $0, %r11
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -24(%rsp), %r9, %rsi ## 8-byte Folded Reload
+ mulxq %r12, %r10, %r15
+ mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload
+ addq %r10, %rdi
+ adcq %r9, %r15
+ adcq $0, %rsi
+ addq %rbx, %r8
+ adcq %rax, %rdi
+ adcq %rcx, %r15
+ adcq %r11, %rsi
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r13
+ movq %r13, %rdx
+ mulxq %r14, %r9, %rbp
+ movq %r14, %r12
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r10, %rax
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ mulxq %rcx, %r11, %rdx
+ addq %r10, %rdx
+ adcq %r9, %rax
+ adcq $0, %rbp
+ addq %r8, %r11
+ adcq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %rsi, %rbp
+ adcq $0, %rbx
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ movq %rax, %rdi
+ sbbq %r14, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rbp, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rsi
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq %rax, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF3Lbmi2: ## @mcl_fp_montNF3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdx, %r10
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%r10), %rax
+ movq %r10, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %rbx, %r14
+ movq %rcx, %rdx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r12
+ movq 16(%rsi), %r11
+ addq %rbx, %r12
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ adcq $0, %rbx
+ movq -8(%r8), %r9
+ movq (%r8), %r14
+ movq %r15, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rbp, %r13
+ addq %r15, %rbp
+ movq 8(%r8), %r15
+ mulxq %r15, %rdi, %rbp
+ adcq %r12, %rdi
+ movq 16(%r8), %r12
+ mulxq %r12, %rax, %r8
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq %r13, %rdi
+ movq 8(%r10), %rdx
+ adcq %rbp, %rax
+ adcq %r8, %rbx
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rsi, %r8
+ mulxq %rcx, %r13, %rbp
+ addq %rsi, %rbp
+ mulxq %r11, %rcx, %rsi
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %rdi, %r13
+ adcq %rax, %rbp
+ adcq %rbx, %rcx
+ adcq $0, %rsi
+ movq %r13, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rdi, %rbx
+ addq %r13, %rdi
+ mulxq %r15, %rax, %rdi
+ adcq %rbp, %rax
+ mulxq %r12, %rbp, %rdx
+ adcq %rcx, %rbp
+ adcq $0, %rsi
+ addq %rbx, %rax
+ adcq %rdi, %rbp
+ adcq %rdx, %rsi
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ movq 16(%rcx), %rdx
+ mulxq %r10, %rbx, %r8
+ mulxq -24(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq %r11, %rcx, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbx
+ addq %rax, %r10
+ adcq %rbp, %rdi
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ imulq %r10, %r9
+ movq %r9, %rdx
+ mulxq %r14, %rdx, %r8
+ addq %r10, %rdx
+ movq %r9, %rdx
+ mulxq %r12, %rbp, %rsi
+ mulxq %r15, %rax, %rdx
+ adcq %rdi, %rax
+ adcq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %rdx, %rbp
+ adcq %rsi, %rbx
+ movq %rax, %rcx
+ subq %r14, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ movq %rsi, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 16(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed3Lbmi2: ## @mcl_fp_montRed3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r15
+ movq (%rcx), %r9
+ movq (%rsi), %rbx
+ movq %rbx, %rdx
+ imulq %r15, %rdx
+ movq 16(%rcx), %rax
+ mulxq %rax, %r14, %r11
+ movq %rax, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %r10
+ mulxq %r10, %rax, %r13
+ mulxq %r9, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r14, %r13
+ adcq $0, %r11
+ movq 40(%rsi), %r14
+ movq 32(%rsi), %r12
+ addq %rbx, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r13
+ adcq 24(%rsi), %r11
+ adcq $0, %r12
+ adcq $0, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq %rbp, %rbp, %rdi
+ mulxq %r10, %r8, %rbx
+ mulxq %r9, %rdx, %rax
+ addq %r8, %rax
+ adcq %rbp, %rbx
+ adcq $0, %rdi
+ addq %rcx, %rdx
+ adcq %r13, %rax
+ adcq %r11, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ adcq $0, %rsi
+ imulq %rax, %r15
+ movq %r15, %rdx
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r8, %rcx
+ movq %r15, %rdx
+ mulxq %r10, %r11, %r12
+ mulxq %r9, %r15, %rdx
+ addq %r11, %rdx
+ adcq %r8, %r12
+ adcq $0, %rcx
+ addq %rax, %r15
+ adcq %rbx, %rdx
+ adcq %rdi, %r12
+ adcq %r14, %rcx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %r12, %rdi
+ sbbq %r10, %rdi
+ movq %rcx, %rbp
+ sbbq %r13, %rbp
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %r12, %rdi
+ movq %rdi, 8(%rcx)
+ movq %rbp, 16(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre3Lbmi2: ## @mcl_fp_addPre3Lbmi2
+## BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre3Lbmi2: ## @mcl_fp_subPre3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_3Lbmi2: ## @mcl_fp_shr1_3Lbmi2
+## BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_add3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add3Lbmi2: ## @mcl_fp_add3Lbmi2
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB44_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+LBB44_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF3Lbmi2: ## @mcl_fp_addNF3Lbmi2
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_sub3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub3Lbmi2: ## @mcl_fp_sub3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB46_2
+## BB#1: ## %nocarry
+ retq
+LBB46_2: ## %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF3Lbmi2: ## @mcl_fp_subNF3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add3Lbmi2: ## @mcl_fpDbl_add3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sub3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub3Lbmi2: ## @mcl_fpDbl_sub3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre4Lbmi2: ## @mcl_fp_mulUnitPre4Lbmi2
+## BB#0:
+ mulxq 24(%rsi), %r8, %r11
+ mulxq 16(%rsi), %r9, %rax
+ mulxq 8(%rsi), %r10, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq $0, %r11
+ movq %r11, 32(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre4Lbmi2: ## @mcl_fpDbl_mulPre4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r14
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %rdx, %rbp
+ movq %r14, %rdx
+ mulxq %rcx, %rdx, %r15
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r9
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %rbx, %r12
+ addq %r15, %rbx
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %r15
+ adcq %r12, %r13
+ movq %r11, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r15, %rcx
+ adcq $0, %r12
+ movq 8(%rbp), %rax
+ movq %r14, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbx, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %r13, %r15
+ movq %r9, %rdx
+ mulxq %rax, %rbx, %r13
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %r12, %rcx
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -8(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r13, %rcx
+ movq %r8, 8(%rdi)
+ adcq %rax, %r12
+ movq %rbp, %r13
+ movq 16(%r13), %rax
+ movq %r14, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %r15, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq %r11, %rdx
+ mulxq %rax, %r14, %r11
+ movq %r9, %rdx
+ mulxq %rax, %r15, %rdx
+ adcq %rcx, %r15
+ adcq %r12, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r8, %rbp
+ adcq %r10, %r15
+ adcq %rdx, %r14
+ adcq %r11, %rcx
+ movq 24(%r13), %rdx
+ mulxq 24(%rsi), %rbx, %r8
+ mulxq (%rsi), %rax, %r9
+ addq %rbp, %rax
+ movq %rax, 24(%rdi)
+ mulxq 16(%rsi), %rbp, %rax
+ mulxq 8(%rsi), %rsi, %rdx
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %rcx, %rbx
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre4Lbmi2: ## @mcl_fpDbl_sqrPre4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r11
+ movq %rdx, (%rdi)
+ movq %r9, %rdx
+ mulxq %rcx, %rbp, %r10
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %r10, -8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %r15
+ addq %r12, %r11
+ movq %r15, %rbx
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r10, %rcx
+ adcq $0, %r13
+ addq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %r12
+ adcq %rbx, %rbp
+ movq %r8, %rdx
+ mulxq %rax, %r10, %rbx
+ movq %r9, %rdx
+ mulxq %rax, %r14, %rdx
+ adcq %r14, %rcx
+ adcq %r13, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r15, %rbp
+ adcq %r12, %rcx
+ adcq %rdx, %r10
+ movq %rdx, %r12
+ adcq %rbx, %rax
+ movq %r11, 8(%rdi)
+ addq -16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdi)
+ movq %r8, %rdx
+ mulxq %r9, %r11, %r8
+ movq %r9, %rdx
+ mulxq %r9, %r15, %rdx
+ adcq %r14, %rcx
+ adcq %r10, %r15
+ adcq %rax, %r11
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %rdx, %r11
+ adcq %r8, %rax
+ movq 24(%rsi), %rdx
+ mulxq 16(%rsi), %rbx, %r8
+ mulxq 8(%rsi), %rbp, %r9
+ mulxq (%rsi), %rsi, %r10
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r15, %rbp
+ adcq %r11, %rbx
+ mulxq %rdx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r10, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 40(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont4Lbmi2: ## @mcl_fp_mont4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r13
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%r13), %rax
+ movq %r13, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %rdi, %r11
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r12
+ movq %rbp, %rdx
+ mulxq %rax, %r14, %r8
+ addq %rsi, %r8
+ adcq %rbx, %r12
+ adcq %rdi, %r10
+ adcq $0, %r11
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rax, %rdx
+ movq 24(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %rax
+ movq 16(%rcx), %rsi
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rsi
+ movq (%rcx), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rdi, %rcx
+ mulxq %rbp, %rdx, %rbx
+ addq %rdi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rsi
+ adcq $0, %rax
+ addq %r14, %rdx
+ adcq %r8, %rbx
+ adcq %r12, %rcx
+ adcq %r10, %rsi
+ adcq %r11, %rax
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq 8(%r13), %rdx
+ mulxq -32(%rsp), %r12, %r10 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %r11 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r14, %rbp ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ addq %r14, %r9
+ adcq %r15, %rbp
+ adcq %r12, %r11
+ adcq $0, %r10
+ addq %rbx, %r8
+ adcq %rcx, %r9
+ adcq %rsi, %rbp
+ adcq %rax, %r11
+ adcq %rdi, %r10
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ movq %r8, %rdx
+ imulq -88(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rcx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r12, %rax ## 8-byte Folded Reload
+ movq -24(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rdx, %rdi
+ addq %r12, %rdi
+ adcq %r15, %rax
+ adcq %r14, %rsi
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %r9, %rdi
+ adcq %rbp, %rax
+ adcq %r11, %rsi
+ adcq %r10, %rcx
+ adcq $0, %rbx
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -32(%rsp), %r14, %r11 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %rbp ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r12, %r8 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ addq %r12, %r10
+ adcq %r15, %r8
+ adcq %r14, %rbp
+ adcq $0, %r11
+ addq %rdi, %r9
+ adcq %rax, %r10
+ adcq %rsi, %r8
+ adcq %rcx, %rbp
+ adcq %rbx, %r11
+ sbbq %rax, %rax
+ movq %r9, %rdx
+ imulq -88(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq %r13, %r14, %rdi
+ addq %rcx, %rdi
+ mulxq -80(%rsp), %rcx, %r15 ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ movq -64(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rbx, %rsi
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ andl $1, %eax
+ addq %r9, %r14
+ adcq %r10, %rdi
+ adcq %r8, %rcx
+ adcq %rbp, %rbx
+ adcq %r11, %rsi
+ adcq $0, %rax
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -32(%rsp), %r11, %r8 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %r9 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r12, %r14 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ addq %r12, %rbp
+ adcq %r15, %r14
+ adcq %r11, %r9
+ adcq $0, %r8
+ addq %rdi, %r10
+ adcq %rcx, %rbp
+ adcq %rbx, %r14
+ adcq %rsi, %r9
+ adcq %rax, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ imulq %r10, %rdx
+ mulxq %r13, %rcx, %rdi
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ movq -72(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %r12, %rcx
+ movq -24(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rdx, %r13
+ addq %r12, %r13
+ adcq %r15, %rcx
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %r10, %rdx
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %r9, %rsi
+ adcq %r8, %rdi
+ adcq $0, %rax
+ movq %r13, %rdx
+ subq %r11, %rdx
+ movq %rcx, %rbp
+ sbbq %rbx, %rbp
+ movq %rsi, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -64(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rdi, %rbx
+ testb %al, %al
+ cmovneq %r13, %rdx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rdx, (%rax)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rax)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rax)
+ movq %rbx, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF4Lbmi2: ## @mcl_fp_montNF4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdx, %r15
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %rdi, %rdx
+ mulxq %rax, %r12, %rbx
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %rbp, %rbx
+ mulxq %rax, %r14, %rbp
+ adcq %r9, %r14
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rdi
+ adcq %rbp, %r8
+ adcq $0, %rdi
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %r13, %rdx
+ mulxq %rax, %rax, %r11
+ addq %r12, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rcx, %rdx
+ adcq %r8, %rcx
+ adcq $0, %rdi
+ addq %r11, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq %rdx, %rdi
+ movq 8(%r15), %rdx
+ movq -64(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rbx, %r9
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %r10, %r11
+ addq %rbx, %r11
+ mulxq -40(%rsp), %rax, %r8 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ mulxq -80(%rsp), %r9, %rbx ## 8-byte Folded Reload
+ adcq %r8, %r9
+ adcq $0, %rbx
+ addq %rbp, %r10
+ adcq %rsi, %r11
+ adcq %rcx, %rax
+ adcq %rdi, %r9
+ adcq $0, %rbx
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rcx, %r8
+ addq %r10, %rcx
+ mulxq -16(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %r11, %r10
+ mulxq -32(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -72(%rsp), %rax, %rdx ## 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq $0, %rbx
+ addq %r8, %r10
+ adcq %rdi, %rcx
+ adcq %rsi, %rax
+ adcq %rdx, %rbx
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r12, %rsi, %r8
+ mulxq %r15, %r11, %rbp
+ addq %rsi, %rbp
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rdi, %r9
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ adcq %r9, %r8
+ adcq $0, %rsi
+ addq %r10, %r11
+ adcq %rcx, %rbp
+ adcq %rax, %rdi
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r11, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r10
+ addq %r11, %rax
+ movq -16(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r9, %rbx
+ adcq %rbp, %r9
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rax, %rbp
+ adcq %rdi, %rax
+ mulxq -72(%rsp), %rcx, %rdx ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %r10, %r9
+ adcq %rbx, %rax
+ adcq %rbp, %rcx
+ adcq %rdx, %rsi
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq %r12, %rdi, %r10
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %rax, %rbp
+ adcq %rcx, %rdi
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ imulq %r11, %r13
+ movq %r13, %rdx
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rcx, %r9
+ addq %r11, %rcx
+ mulxq %r14, %r11, %r10
+ adcq %rbp, %r11
+ movq %r15, %rsi
+ mulxq %rsi, %rax, %rcx
+ adcq %rdi, %rax
+ movq -72(%rsp), %rbp ## 8-byte Reload
+ mulxq %rbp, %r15, %rdx
+ adcq %r8, %r15
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %r10, %rax
+ adcq %rcx, %r15
+ adcq %rdx, %rbx
+ movq %r11, %rcx
+ subq %r12, %rcx
+ movq %rax, %rdx
+ sbbq %r14, %rdx
+ movq %r15, %rdi
+ sbbq %rsi, %rdi
+ movq %rbx, %rsi
+ sbbq %rbp, %rsi
+ cmovsq %r11, %rcx
+ movq -8(%rsp), %rbp ## 8-byte Reload
+ movq %rcx, (%rbp)
+ cmovsq %rax, %rdx
+ movq %rdx, 8(%rbp)
+ cmovsq %r15, %rdi
+ movq %rdi, 16(%rbp)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rbp)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed4Lbmi2: ## @mcl_fp_montRed4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq 24(%rcx), %rdi
+ mulxq %rdi, %r9, %r15
+ movq %rdi, %r14
+ movq %r14, -40(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rdi, %rbx
+ movq 8(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rcx, %r8
+ mulxq %rax, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq $0, %r15
+ movq 56(%rsi), %r11
+ movq 48(%rsi), %rcx
+ addq %r10, %rdx
+ movq 40(%rsi), %r12
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq $0, %r12
+ adcq $0, %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbp, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r9
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r14, %rcx
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbp, %rdx
+ adcq %r8, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq %r12, %r9
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r11
+ movq %r11, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ movq -40(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rbp, %r8
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rbx, %r10
+ movq %rbx, -24(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %r12, %rbp ## 8-byte Folded Reload
+ movq -32(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rdx, %rbx
+ addq %r12, %rbx
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rax, %rdx
+ adcq %rcx, %rbx
+ adcq %rdi, %rbp
+ adcq %r9, %r10
+ adcq -64(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ imulq %rbx, %r13
+ movq %r13, %rdx
+ mulxq %r15, %rax, %rdi
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %r13, %rdx
+ mulxq %r11, %r9, %rax
+ movq -56(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %r12, %rcx
+ mulxq %r14, %r15, %r13
+ addq %r12, %r13
+ adcq %r9, %rcx
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbx, %r15
+ adcq %rbp, %r13
+ adcq %r10, %rcx
+ adcq %r8, %rax
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %r13, %rdx
+ subq %r14, %rdx
+ movq %rcx, %rbp
+ sbbq %r11, %rbp
+ movq %rax, %r8
+ sbbq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -40(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rdi, %rbx
+ testb %sil, %sil
+ cmovneq %r13, %rdx
+ movq -8(%rsp), %rsi ## 8-byte Reload
+ movq %rdx, (%rsi)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rsi)
+ cmovneq %rax, %r8
+ movq %r8, 16(%rsi)
+ movq %rbx, 24(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre4Lbmi2: ## @mcl_fp_addPre4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre4Lbmi2: ## @mcl_fp_subPre4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_4Lbmi2: ## @mcl_fp_shr1_4Lbmi2
+## BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_add4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add4Lbmi2: ## @mcl_fp_add4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB59_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+LBB59_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF4Lbmi2: ## @mcl_fp_addNF4Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_sub4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub4Lbmi2: ## @mcl_fp_sub4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB61_2
+## BB#1: ## %nocarry
+ retq
+LBB61_2: ## %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF4Lbmi2: ## @mcl_fp_subNF4Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r8
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r15
+ movd %xmm1, %r9
+ movd %xmm3, %r11
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r10
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r14
+ movd %xmm0, %rdx
+ movd %xmm2, %r12
+ subq %rdx, %r12
+ sbbq %r10, %r14
+ sbbq %r9, %r11
+ sbbq %r8, %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r12, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add4Lbmi2: ## @mcl_fpDbl_add4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub4Lbmi2: ## @mcl_fpDbl_sub4Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre5Lbmi2: ## @mcl_fp_mulUnitPre5Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ mulxq 32(%rsi), %r8, %r11
+ mulxq 24(%rsi), %r9, %rax
+ mulxq 16(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r14, %rbx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r8, %rax
+ movq %rax, 32(%rdi)
+ adcq $0, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre5Lbmi2: ## @mcl_fpDbl_mulPre5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq (%rsi), %r11
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %r10, %rdx
+ mulxq %rcx, %rax, %r14
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %rbx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r15
+ addq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rcx, %rax, %r13
+ adcq %r14, %rax
+ movq %rbp, %rdx
+ mulxq %rcx, %r8, %r12
+ adcq %r13, %r8
+ movq 32(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %r9, %r13
+ adcq %r12, %r9
+ movq -56(%rsp), %rcx ## 8-byte Reload
+ movq %rcx, (%rdi)
+ adcq $0, %r13
+ movq -24(%rsp), %rdi ## 8-byte Reload
+ movq 8(%rdi), %rbp
+ movq %r11, %rdx
+ mulxq %rbp, %r12, %r11
+ addq %rbx, %r12
+ movq %r10, %rdx
+ mulxq %rbp, %rbx, %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ adcq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rbp, %rcx, %r10
+ adcq %r8, %rcx
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rbp, %rax, %r8
+ adcq %r9, %rax
+ movq %r14, %rdx
+ mulxq %rbp, %r15, %rdx
+ adcq %r13, %r15
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %r11, %rbx
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ movq %r12, 8(%rbp)
+ adcq -56(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r8, %r15
+ adcq %rdx, %r14
+ movq (%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -48(%rsp) ## 8-byte Spill
+ movq 16(%rdi), %rbp
+ mulxq %rbp, %r12, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbx, %r12
+ movq %r8, %rdx
+ mulxq %rbp, %rbx, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rbp, %rcx, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %rax, %rcx
+ movq 24(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rbp, %r9, %r10
+ adcq %r15, %r9
+ movq 32(%rsi), %r15
+ movq %r15, %rdx
+ mulxq %rbp, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -32(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq %rdx, %r14
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ movq %r12, 16(%r10)
+ movq %rdi, %rbp
+ movq 24(%rbp), %rax
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r12, %rdi
+ addq %rbx, %r12
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %r11
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rax, %r13, %r9
+ adcq %r8, %r13
+ movq %r15, %rdx
+ mulxq %rax, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdi, %rbx
+ movq %r12, 24(%r10)
+ movq %r10, %rdi
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r9, %r8
+ adcq %rdx, %r14
+ movq 32(%rbp), %rdx
+ mulxq 8(%rsi), %rax, %r9
+ mulxq (%rsi), %rbp, %r10
+ addq %rbx, %rbp
+ adcq %rcx, %rax
+ mulxq 16(%rsi), %rbx, %r11
+ adcq %r13, %rbx
+ movq %rbp, 32(%rdi)
+ mulxq 32(%rsi), %rcx, %r15
+ mulxq 24(%rsi), %rsi, %rdx
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r10, %rax
+ movq %rax, 40(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 64(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre5Lbmi2: ## @mcl_fpDbl_sqrPre5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %r11, %rdx
+ mulxq %rax, %rbx, %r15
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r13
+ movq %rcx, %rdx
+ mulxq %rax, %r12, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %r14
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ addq %r12, %r14
+ adcq %rbp, %rbx
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ adcq %r15, %r8
+ movq %r9, %rdx
+ movq %r9, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r15
+ adcq %r10, %rbp
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r15
+ addq %r12, %r14
+ movq %rcx, %rdx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %rcx, %rbx, %r10
+ adcq %r8, %rbx
+ movq %r13, %rdx
+ mulxq %rcx, %r13, %r8
+ adcq %rbp, %r13
+ movq %r9, %rdx
+ mulxq %rcx, %r12, %rcx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %r14, 8(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %r8, %r12
+ adcq %rcx, %r15
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ movq %r9, %rdx
+ mulxq %r11, %rbp, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %rbp
+ movq %r10, %rdx
+ mulxq %r11, %rax, %r8
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %r11, %r14, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ adcq %r13, %r14
+ movq 24(%rsi), %rcx
+ movq %rcx, %rdx
+ mulxq %r11, %rbx, %r13
+ adcq %r12, %rbx
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ mulxq %r11, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r8, %r14
+ movq %rbp, 16(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r13, %r12
+ adcq %rdx, %r15
+ movq %r10, %rdx
+ mulxq %rcx, %r10, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %r13
+ movq 16(%rsi), %r8
+ movq 32(%rsi), %rax
+ adcq %r14, %r10
+ movq %r8, %rdx
+ mulxq %rcx, %r9, %r14
+ adcq %rbx, %r9
+ movq %rcx, %rdx
+ mulxq %rcx, %r11, %rbp
+ adcq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r13, 24(%rdi)
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq %rbp, %r12
+ adcq %rdx, %rbx
+ movq %rax, %rdx
+ mulxq 24(%rsi), %rbp, %r14
+ mulxq (%rsi), %rdx, %r15
+ addq %r10, %rdx
+ movq %rdx, 32(%rdi)
+ movq %rax, %rdx
+ mulxq 8(%rsi), %rsi, %r10
+ adcq %r9, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %rcx, %r8
+ adcq %r11, %rcx
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rax
+ adcq %rbx, %rdx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r15, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r10, %rcx
+ movq %rcx, 48(%rdi)
+ adcq %r8, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 64(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont5Lbmi2: ## @mcl_fp_mont5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rdi
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r13, %r11
+ movq (%rsi), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %r15, %r8
+ addq %rdi, %r8
+ adcq %r13, %r9
+ adcq %r12, %r11
+ adcq %r10, %r14
+ adcq $0, %rbx
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r12
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %r13, %r10
+ movq 8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ movq (%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ addq %rdi, %rbx
+ movq 16(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r15, %rsi
+ adcq %r8, %rbx
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r14, %r10
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -104(%rsp), %rax, %r14 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r11, %rax ## 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbx, %r11
+ adcq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r10, %r9
+ adcq %r12, %r15
+ adcq %rbp, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r11, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -88(%rsp), %r13, %rcx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %r8, %rbp
+ adcq %r13, %rbx
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r11, %rdx
+ adcq %rax, %rbp
+ adcq %rsi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rdi
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -104(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r13, %r8
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbp, %r14
+ adcq %rbx, %rax
+ adcq %rcx, %r8
+ adcq %rdi, %r9
+ adcq %r10, %r11
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq %r14, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -88(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rdi, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq %rax, %rbp
+ adcq %r8, %rbx
+ adcq %r9, %rsi
+ adcq %r11, %r10
+ adcq %r15, %r12
+ adcq $0, %r13
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %r11, %r14 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %rcx ## 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %r8, %rdi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r15
+ adcq %rbx, %rcx
+ adcq %rsi, %rdi
+ adcq %r10, %r9
+ adcq %r12, %r14
+ adcq %r13, %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r15, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r13, %r10 ## 8-byte Folded Reload
+ mulxq -88(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %r11
+ adcq %r13, %r8
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %rdx
+ adcq %rcx, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r8
+ adcq %r14, %r10
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq -96(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -104(%rsp), %rcx, %r14 ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ adcq %rsi, %r9
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rax, %r13
+ adcq %r11, %rdi
+ adcq %r8, %r9
+ adcq %r10, %r15
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ sbbq %rax, %rax
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -80(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -88(%rsp), %rbp, %r11 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -64(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ adcq %r11, %rcx
+ mulxq -56(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ adcq %r12, %rsi
+ adcq $0, %r11
+ andl $1, %eax
+ addq %r13, %r10
+ adcq %rdi, %r8
+ adcq %r9, %rbp
+ adcq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r14, %r11
+ adcq $0, %rax
+ movq %r8, %rdi
+ subq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rbp, %rbx
+ sbbq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -88(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -56(%rsp), %r10 ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r8, %rdi
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rbp, %rbx
+ movq %rbx, 8(%rax)
+ cmovneq %rcx, %r9
+ movq %r9, 16(%rax)
+ movq %rdx, 24(%rax)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF5Lbmi2: ## @mcl_fp_montNF5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r13
+ movq 8(%rsi), %rbp
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %r13, %rdx
+ movq %r13, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r10
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %rbp, %r10
+ mulxq %rax, %rbp, %rbx
+ adcq %r9, %rbp
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r9
+ adcq %rbx, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ adcq %r9, %rax
+ adcq $0, %r11
+ movq -8(%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq %r8, %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r14
+ addq %r8, %rbx
+ movq 8(%rcx), %rsi
+ movq %rsi, -40(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r12
+ adcq %r10, %rbx
+ movq 16(%rcx), %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r10, %rdi
+ adcq %rbp, %r10
+ movq 24(%rcx), %rsi
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %r15, %r9
+ movq 32(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r8, %rcx
+ adcq %rax, %r8
+ adcq $0, %r11
+ addq %r14, %rbx
+ adcq %r12, %r10
+ adcq %rdi, %r9
+ adcq %rbp, %r8
+ adcq %rcx, %r11
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq %r13, %r14, %rax
+ addq %rcx, %rax
+ mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %rbx, %r14
+ adcq %r10, %rax
+ adcq %r9, %rcx
+ adcq %r8, %rsi
+ adcq %r11, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ movq -32(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ mulxq -48(%rsp), %rbx, %r15 ## 8-byte Folded Reload
+ addq %r14, %rbx
+ movq -40(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r8, %rbx
+ adcq %rax, %r8
+ mulxq -16(%rsp), %r9, %rax ## 8-byte Folded Reload
+ adcq %rcx, %r9
+ mulxq -88(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq $0, %rbp
+ addq %r15, %r8
+ adcq %rbx, %r9
+ adcq %rax, %r10
+ adcq %rcx, %r11
+ adcq %rdx, %rbp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -64(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -72(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %rcx, %rdi
+ mulxq -80(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rbx
+ adcq %r11, %rdi
+ adcq %rbp, %rcx
+ adcq $0, %rax
+ movq %r14, %rdx
+ imulq %r12, %rdx
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rbp, %r15
+ addq %r14, %rbp
+ mulxq %r13, %r8, %rbp
+ adcq %rsi, %r8
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %rbx, %r9
+ mulxq -88(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload
+ adcq %rcx, %r11
+ adcq $0, %rax
+ addq %r15, %r8
+ adcq %rbp, %r9
+ adcq %rsi, %r10
+ adcq %rbx, %r11
+ adcq %rdx, %rax
+ movq -96(%rsp), %rcx ## 8-byte Reload
+ movq 24(%rcx), %rdx
+ mulxq -104(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rcx ## 8-byte Folded Reload
+ addq %rdi, %rcx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rbx
+ mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %r8, %r14
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq %r11, %rsi
+ adcq %rax, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ imulq -32(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq %r12, %rax, %r11
+ addq %r14, %rax
+ mulxq -40(%rsp), %r8, %r14 ## 8-byte Folded Reload
+ adcq %rcx, %r8
+ mulxq %r13, %r9, %rax
+ adcq %rbx, %r9
+ movq -88(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r10, %rbx
+ adcq %rsi, %r10
+ mulxq -56(%rsp), %rcx, %rdx ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq $0, %rbp
+ addq %r11, %r8
+ adcq %r14, %r9
+ adcq %rax, %r10
+ adcq %rbx, %rcx
+ adcq %rdx, %rbp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -104(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ addq %rdi, %rsi
+ mulxq -64(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %rbx, %rdi
+ mulxq -72(%rsp), %rbx, %r15 ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -80(%rsp), %r11, %rax ## 8-byte Folded Reload
+ adcq %r15, %r11
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbx
+ adcq %rbp, %r11
+ adcq $0, %rax
+ movq -32(%rsp), %rdx ## 8-byte Reload
+ imulq %r14, %rdx
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rcx, %rbp
+ movq %rbp, -96(%rsp) ## 8-byte Spill
+ addq %r14, %rcx
+ movq -40(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %r14, %rcx
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %r14
+ movq %r13, %r8
+ mulxq %r8, %r15, %r13
+ adcq %rdi, %r15
+ mulxq %r12, %rbp, %rcx
+ adcq %rbx, %rbp
+ movq -56(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %r12, %rdx
+ adcq %r11, %r12
+ adcq $0, %rax
+ addq -96(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %r13, %rbp
+ adcq %rcx, %r12
+ adcq %rdx, %rax
+ movq %r14, %rcx
+ subq %r10, %rcx
+ movq %r15, %rsi
+ sbbq %r9, %rsi
+ movq %rbp, %rdi
+ sbbq %r8, %rdi
+ movq %r12, %r8
+ sbbq -88(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rax, %rdx
+ sbbq %rbx, %rdx
+ movq %rdx, %rbx
+ sarq $63, %rbx
+ cmovsq %r14, %rcx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rcx, (%rbx)
+ cmovsq %r15, %rsi
+ movq %rsi, 8(%rbx)
+ cmovsq %rbp, %rdi
+ movq %rdi, 16(%rbx)
+ cmovsq %r12, %r8
+ movq %r8, 24(%rbx)
+ cmovsq %rax, %rdx
+ movq %rdx, 32(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed5Lbmi2: ## @mcl_fp_montRed5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r14
+ movq 24(%rcx), %r12
+ mulxq %r12, %r10, %r13
+ movq %r12, -56(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %r9
+ mulxq %r9, %rdi, %rbp
+ movq %r9, -64(%rsp) ## 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ mulxq %rbx, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rdi, %r11
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %r14
+ addq %r15, %rdx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r13
+ adcq 40(%rsi), %r14
+ movq %r14, -112(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ movq -104(%rsp), %r14 ## 8-byte Reload
+ imulq %r14, %rdx
+ mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %r12, %rax, %r10
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %r9, %rbx, %r8
+ movq -80(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r9, %rdi
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rbx, %rdi
+ adcq -24(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rcx, %rdx
+ adcq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %r13, %r8
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r14, %rdx
+ mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rcx, %r14 ## 8-byte Folded Reload
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r11, %rbx ## 8-byte Folded Reload
+ mulxq %r12, %r9, %rbp
+ mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %r9, %rcx
+ adcq %r11, %rbp
+ adcq -32(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %rbp
+ adcq %r10, %rbx
+ adcq %r15, %r14
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rcx, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -72(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %rax, %r12
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r8, %r11 ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r8, %r15
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rdx
+ adcq %rbp, %rax
+ adcq %rbx, %r15
+ adcq %r14, %r11
+ adcq %r13, %r10
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ mulxq %r9, %rdi, %rcx
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r8, %r9
+ mulxq -80(%rsp), %rbx, %rdx ## 8-byte Folded Reload
+ addq %r9, %rbx
+ adcq %r13, %rdx
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rax, %r8
+ adcq %r15, %rbx
+ adcq %r11, %rdx
+ adcq %r10, %rbp
+ adcq %r12, %rdi
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %rbx, %rax
+ subq %r14, %rax
+ movq %rdx, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rbp, %r9
+ sbbq -64(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -56(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -72(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %r11
+ testb %sil, %sil
+ cmovneq %rbx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdx, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ cmovneq %rdi, %r10
+ movq %r10, 24(%rcx)
+ movq %r11, 32(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre5Lbmi2: ## @mcl_fp_addPre5Lbmi2
+## BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre5Lbmi2: ## @mcl_fp_subPre5Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_shr1_5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_5Lbmi2: ## @mcl_fp_shr1_5Lbmi2
+## BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+
+ .globl _mcl_fp_add5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add5Lbmi2: ## @mcl_fp_add5Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB74_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+LBB74_2: ## %carry
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_addNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF5Lbmi2: ## @mcl_fp_addNF5Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub5Lbmi2: ## @mcl_fp_sub5Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB76_2
+## BB#1: ## %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+LBB76_2: ## %nocarry
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF5Lbmi2: ## @mcl_fp_subNF5Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r12
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r9
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r8
+ movd %xmm1, %r10
+ movd %xmm3, %r14
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r11
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r15
+ movd %xmm0, %rsi
+ movd %xmm2, %r13
+ subq %rsi, %r13
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ sbbq 32(%rdx), %r12
+ movq %r12, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r12, %rsi
+ movq 8(%rcx), %rax
+ andq %rsi, %rax
+ andq (%rcx), %rsi
+ movq 32(%rcx), %r9
+ andq %rdx, %r9
+ rorxq $63, %rdx, %rbx
+ andq 24(%rcx), %rdx
+ andq 16(%rcx), %rbx
+ addq %r13, %rsi
+ movq %rsi, (%rdi)
+ adcq %r15, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r12, %r9
+ movq %r9, 32(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add5Lbmi2: ## @mcl_fpDbl_add5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub5Lbmi2: ## @mcl_fpDbl_sub5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre6Lbmi2: ## @mcl_fp_mulUnitPre6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ mulxq 40(%rsi), %r8, %r11
+ mulxq 32(%rsi), %r9, %r12
+ mulxq 24(%rsi), %r10, %rcx
+ mulxq 16(%rsi), %r14, %rbx
+ mulxq 8(%rsi), %r15, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r14, %rax
+ movq %rax, 16(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 32(%rdi)
+ adcq %r8, %r12
+ movq %r12, 40(%rdi)
+ adcq $0, %r11
+ movq %r11, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre6Lbmi2: ## @mcl_fpDbl_mulPre6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r11
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rcx
+ movq %rcx, -80(%rsp) ## 8-byte Spill
+ movq (%r11), %rax
+ movq %r11, -56(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rax, %rcx, %r14
+ movq %r15, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %rcx, %rbp
+ mulxq %rax, %rcx, %r12
+ adcq %r14, %rcx
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r14
+ adcq %r12, %rbx
+ movq 32(%rsi), %r12
+ movq %r12, %rdx
+ mulxq %rax, %r8, %r13
+ adcq %r14, %r8
+ movq 40(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rax, %r9, %r10
+ adcq %r13, %r9
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r10
+ movq 8(%r11), %rdi
+ movq %r15, %rdx
+ mulxq %rdi, %r13, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rbp, %r13
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbp
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %r11
+ adcq %rbx, %rax
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq %r12, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r14, %rdx
+ mulxq %rdi, %r12, %rdx
+ adcq %r10, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -72(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r11, %rbx
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r13, 8(%rdi)
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -80(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ movq 16(%r14), %rdi
+ mulxq %rdi, %r13, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbp, %r13
+ movq %r8, %rdx
+ mulxq %rdi, %r8, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %rax, %r8
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %rbx, %r11
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rax, %rbx
+ adcq %rcx, %rax
+ movq 32(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r10, %rcx
+ adcq %r12, %r10
+ movq 40(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -8(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rbx, %r10
+ adcq %rcx, %r9
+ adcq %rdx, %rbp
+ movq -48(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 16(%rcx)
+ movq 24(%r14), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r12, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ addq %r8, %r12
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -80(%rsp) ## 8-byte Spill
+ adcq %r11, %rbx
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rcx, %r11
+ adcq %rax, %rcx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r14, %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ adcq %r10, %r14
+ movq -32(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r8, %rax
+ adcq %r9, %r8
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r13, %rdx
+ adcq %rbp, %r13
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -88(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %r14
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r12, 24(%rdi)
+ adcq -64(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %rax, %r13
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -80(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdi
+ mulxq %rdi, %r12, %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ addq %rbx, %r12
+ movq %rbp, %rdx
+ mulxq %rdi, %rbx, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq %r14, %rax
+ movq 24(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq %r8, %rbp
+ movq 32(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r10
+ adcq %r13, %rcx
+ movq 40(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r10, %r9
+ adcq %rdx, %rsi
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ movq %r12, 32(%r10)
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r15, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %rbx, %r15
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %r12
+ adcq %rax, %rbx
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %r11
+ adcq %rbp, %rax
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %r14
+ adcq %rcx, %rbp
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rdi, %rdi, %r9
+ adcq %rsi, %rdi
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -56(%rsp), %rbx ## 8-byte Folded Reload
+ movq %r15, 40(%r10)
+ movq %rbx, 48(%r10)
+ adcq %r12, %rax
+ movq %rax, 56(%r10)
+ adcq %r11, %rbp
+ movq %rbp, 64(%r10)
+ adcq %r14, %rcx
+ movq %rcx, 72(%r10)
+ adcq %r8, %rdi
+ movq %rdi, 80(%r10)
+ adcq %r9, %rsi
+ movq %rsi, 88(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre6Lbmi2: ## @mcl_fpDbl_sqrPre6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r10, %r8
+ movq 24(%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r11, %rbx
+ movq %rbx, -40(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r14
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %r11, %r14
+ adcq %rbx, %r10
+ movq %rbp, %rdx
+ mulxq %rcx, %r15, %rbp
+ adcq %r8, %r15
+ movq 32(%rsi), %rbx
+ movq %rbx, %rdx
+ mulxq %rcx, %r8, %r13
+ adcq %rbp, %r8
+ movq 40(%rsi), %rdi
+ movq %rdi, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r13, %rcx
+ movq %r9, -24(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq %rdx, (%r9)
+ adcq $0, %r12
+ addq %r11, %r14
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ adcq %r10, %rbp
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r13, %r10
+ adcq %r15, %r13
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r8
+ adcq %rcx, %rbx
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rax
+ adcq %r12, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r14, 8(%r9)
+ adcq %r10, %r15
+ adcq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r8, %r11
+ adcq %rax, %r12
+ movq (%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rcx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ addq %rbp, %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rcx, %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq %r13, %rbp
+ movq %rcx, %rdx
+ mulxq %rcx, %r13, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq %r15, %r13
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rcx, %r8, %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq 32(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rcx, %r14, %r15
+ adcq %r11, %r14
+ movq 40(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rcx, %r9, %rdx
+ adcq %r12, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -8(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r15, %r9
+ adcq %rdx, %rcx
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %rbp, %rdi
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rbp
+ adcq %r13, %r15
+ adcq %r8, %rbx
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r12, %r10
+ adcq %r9, %r12
+ movq %r11, %rdx
+ mulxq %rax, %r13, %rax
+ adcq %rcx, %r13
+ sbbq %r9, %r9
+ andl $1, %r9d
+ addq -48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rbp, %rbx
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ movq %rbp, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %rax, %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ addq %r15, %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %rbx, %r15
+ movq 16(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rax, %r14, %rbx
+ adcq %r8, %r14
+ movq 24(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rax, %rbp, %rdi
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %r11, %r12
+ adcq %r13, %r11
+ movq 40(%rsi), %rsi
+ movq %rsi, %rdx
+ mulxq %rax, %r13, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq %r13, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rdi, %r11
+ adcq %r12, %r9
+ adcq %rdx, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %r12, %rcx
+ addq %r15, %r12
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rsi, %rdi, %r15
+ adcq %r14, %rdi
+ movq %r10, %rdx
+ mulxq %rsi, %rbx, %r10
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rsi, %rbp, %r8
+ adcq %r11, %rbp
+ adcq %r13, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rcx, %rdi
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ movq %rcx, 32(%rdx)
+ movq %r12, 40(%rdx)
+ movq %rdi, 48(%rdx)
+ adcq %r15, %rbx
+ movq %rbx, 56(%rdx)
+ adcq %r10, %rbp
+ movq %rbp, 64(%rdx)
+ adcq %r8, %r9
+ movq %r9, 72(%rdx)
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 80(%rdx)
+ adcq %r11, %rax
+ movq %rax, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont6Lbmi2: ## @mcl_fp_mont6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $32, %rsp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, 24(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r12
+ movq 24(%rsi), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r13
+ movq 16(%rsi), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ addq %rdi, %rbp
+ adcq %r8, %r9
+ adcq %r15, %r10
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq $0, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r15
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rax
+ movq 8(%rcx), %rsi
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r11
+ movq (%rcx), %rsi
+ movq %rsi, -64(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rsi, %r14
+ addq %rbx, %r14
+ adcq %r8, %r11
+ movq 24(%rcx), %rdi
+ movq %rdi, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rdi, %r8
+ adcq %rax, %rdi
+ movq 32(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %rax
+ adcq %r8, %rbx
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %rbp, %r14
+ adcq %r9, %r11
+ adcq %r10, %rdi
+ adcq %r13, %rbx
+ adcq %r12, %rax
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq %r10, %r10
+ andl $1, %r10d
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -96(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r12, %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ addq %rbp, %r9
+ mulxq -16(%rsp), %rbp, %r8 ## 8-byte Folded Reload
+ adcq %rcx, %rbp
+ adcq %r12, %r8
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r14, %rsi
+ adcq %r11, %r9
+ adcq %rdi, %rbp
+ adcq %rbx, %r8
+ adcq %rax, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %r13
+ adcq %r10, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rbx
+ movq %rbx, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r14, %r11 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -48(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r14, %r15
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rbx, %rdi
+ adcq %r9, %rsi
+ adcq %rbp, %rcx
+ adcq %r8, %rax
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq -112(%rsp), %r10 ## 8-byte Reload
+ adcq $0, %r10
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ addq %rdi, %r9
+ mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq 16(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ addq %rsi, %rdi
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq %rcx, %r9
+ adcq %rax, %rbx
+ adcq %r15, %r8
+ adcq %r11, %r13
+ adcq %r12, %rbp
+ adcq %r10, %rdx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r15, %r12 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ addq %rax, %r14
+ mulxq -48(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ adcq %r10, %rsi
+ adcq %r15, %rcx
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rbx, %rax
+ adcq %r8, %rsi
+ adcq %r13, %rcx
+ adcq %rbp, %r12
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %r9, %rbx
+ adcq %r10, %r8
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %r13
+ adcq %rax, %rdi
+ adcq %rsi, %rbx
+ adcq %rcx, %r8
+ adcq %r12, %rbp
+ adcq %r11, %r15
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %r13, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ addq %rax, %rsi
+ mulxq -48(%rsp), %r14, %r9 ## 8-byte Folded Reload
+ adcq %r11, %r14
+ mulxq -72(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r13, %rcx
+ adcq %rdi, %rsi
+ adcq %rbx, %r14
+ adcq %r8, %rax
+ adcq %rbp, %r11
+ adcq %r15, %r12
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ movq -128(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -96(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ addq %rdi, %r8
+ mulxq -16(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r13, %r9
+ adcq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ addq %rsi, %rdi
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ adcq %rax, %rbx
+ adcq %r11, %r9
+ adcq %r12, %rbp
+ adcq %r10, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %r13
+ movq %rdi, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r12, %r15 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -48(%rsp), %r11, %r10 ## 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r12, %rcx
+ adcq %r14, %r15
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq %rbx, %r11
+ adcq %r9, %rax
+ adcq %rbp, %rcx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ mulxq -16(%rsp), %r8, %r12 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ addq %rdi, %r9
+ adcq %r8, %r14
+ adcq %r10, %r12
+ adcq %rbx, %rbp
+ movq -96(%rsp), %rdi ## 8-byte Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rsi, %r13
+ adcq %r11, %r9
+ adcq %rax, %r14
+ adcq %rcx, %r12
+ adcq %r15, %rbp
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -64(%rsp), %r8, %rax ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ addq %rax, %r10
+ mulxq -48(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rbx, %r11 ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %r11, %rdi
+ mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %r15, %rax
+ adcq $0, %r11
+ andl $1, %ecx
+ addq %r13, %r8
+ adcq %r9, %r10
+ adcq %r14, %rsi
+ adcq %r12, %rbx
+ adcq %rbp, %rdi
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %r10, %rbp
+ subq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %r9
+ sbbq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r11, %r15
+ sbbq -40(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r9
+ testb %cl, %cl
+ cmovneq %r10, %rbp
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ movq %rbp, (%rcx)
+ cmovneq %rsi, %rdx
+ movq %rdx, 8(%rcx)
+ cmovneq %rbx, %r8
+ movq %r8, 16(%rcx)
+ movq %r9, 24(%rcx)
+ cmovneq %rax, %r14
+ movq %r14, 32(%rcx)
+ cmovneq %r11, %r15
+ movq %r15, 40(%rcx)
+ addq $32, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF6Lbmi2: ## @mcl_fp_montNF6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r9, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %rdi, %r8
+ adcq %rbx, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rbx, %r10
+ adcq %r8, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r8, %r11
+ adcq %r10, %r8
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rsi, %r15
+ adcq %r11, %rsi
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ addq %r9, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r9
+ adcq %r14, %r12
+ movq 16(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %rax
+ adcq %rdi, %r14
+ movq 24(%rcx), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r13, %rdi
+ adcq %rbx, %r13
+ movq 32(%rcx), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r11, %rbx
+ adcq %r8, %r11
+ movq 40(%rcx), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %rcx
+ adcq %rsi, %r10
+ adcq $0, %r15
+ addq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rax, %r13
+ adcq %rdi, %r11
+ adcq %rbx, %r10
+ adcq %rcx, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -128(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %rbx, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -56(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq %rbp, -88(%rsp) ## 8-byte Spill
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r12, %rbx
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq %r11, %rsi
+ adcq %r10, %rdi
+ adcq %r15, %r8
+ adcq $0, %r9
+ movq %rbx, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r13 ## 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq -16(%rsp), %r11, %rbx ## 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -24(%rsp), %r14, %rax ## 8-byte Folded Reload
+ adcq %rcx, %r14
+ mulxq -32(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -48(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq $0, %r9
+ addq %r13, %r11
+ adcq %rbx, %r14
+ adcq %rax, %r10
+ adcq %rcx, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r9
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -128(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -56(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ adcq %rbp, %rax
+ mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ adcq -88(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq %r15, %rsi
+ adcq %r12, %rax
+ adcq %r9, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r12 ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -24(%rsp), %r9, %rdi ## 8-byte Folded Reload
+ adcq %rbx, %r9
+ mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %rcx
+ addq %r12, %r11
+ adcq %rbp, %r9
+ adcq %rdi, %r10
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rax, %rcx
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -128(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rbx ## 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rbp, %rsi
+ mulxq -72(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -80(%rsp), %r8, %r12 ## 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %r12
+ addq %r11, %r13
+ adcq %r9, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %rsi
+ adcq %r15, %rax
+ adcq %rcx, %r8
+ adcq $0, %r12
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -24(%rsp), %r9, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r9
+ mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r12
+ addq %rcx, %r11
+ adcq %rbp, %r9
+ adcq %rbx, %r10
+ adcq %rdi, %r14
+ adcq %rsi, %r15
+ adcq %rax, %r12
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -128(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ mulxq -56(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -72(%rsp), %rsi, %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r9, %rax
+ adcq %r10, %rbx
+ adcq %r14, %rdi
+ adcq %r15, %rsi
+ adcq %r12, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ adcq %rax, %r13
+ mulxq -24(%rsp), %r11, %rax ## 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -40(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -48(%rsp), %rsi, %rdx ## 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq $0, %rcx
+ addq %r9, %r13
+ adcq %rbp, %r11
+ adcq %rax, %r10
+ adcq %rbx, %r14
+ adcq %rdi, %rsi
+ adcq %rdx, %rcx
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -128(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -64(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %rbp, %r15
+ mulxq -72(%rsp), %r12, %rbp ## 8-byte Folded Reload
+ adcq %rax, %r12
+ mulxq -80(%rsp), %r9, %rax ## 8-byte Folded Reload
+ adcq %rbp, %r9
+ adcq $0, %rax
+ addq %r13, %r8
+ adcq %r11, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %r15
+ adcq %rsi, %r12
+ adcq %rcx, %r9
+ adcq $0, %rax
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ addq %r8, %rcx
+ movq -16(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %r8, %rcx
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq %rbx, %r8
+ movq -24(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq %rdi, %rsi
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rdi, %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %rdi
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ mulxq %rcx, %r15, %rbx
+ adcq %r12, %r15
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r12, %rbp
+ adcq %r9, %r12
+ adcq $0, %rax
+ addq -104(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rbx, %r12
+ adcq %rbp, %rax
+ movq %r8, %rbp
+ subq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %rbx
+ sbbq %r11, %rbx
+ movq %rdi, %r11
+ sbbq %r10, %r11
+ movq %r15, %r10
+ sbbq %r13, %r10
+ movq %r12, %r9
+ sbbq %rcx, %r9
+ movq %rax, %rcx
+ sbbq %r14, %rcx
+ movq %rcx, %rdx
+ sarq $63, %rdx
+ cmovsq %r8, %rbp
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rbp, (%rdx)
+ cmovsq %rsi, %rbx
+ movq %rbx, 8(%rdx)
+ cmovsq %rdi, %r11
+ movq %r11, 16(%rdx)
+ cmovsq %r15, %r10
+ movq %r10, 24(%rdx)
+ cmovsq %r12, %r9
+ movq %r9, 32(%rdx)
+ cmovsq %rax, %rcx
+ movq %rcx, 40(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed6Lbmi2: ## @mcl_fp_montRed6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, %rcx
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r13
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %r8
+ movq 24(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r15
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r11
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ mulxq %rdi, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rbp, %rbx
+ adcq %r14, %r11
+ adcq %r10, %r15
+ adcq %r12, %r8
+ adcq $0, %r13
+ addq %r9, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %rbx
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r15
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r13
+ movq %r13, -104(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %r8
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rax
+ movq 56(%rsi), %r14
+ adcq $0, %r14
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -24(%rsp) ## 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rcx, %rdx
+ imulq -8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -16(%rsp), %rax, %r13 ## 8-byte Folded Reload
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %rbp, %r10 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %r9, %r8 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r9, %rdi
+ adcq %rbp, %r8
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rbx, %rax
+ adcq %r11, %rdi
+ adcq %r15, %r8
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r14, %rsi
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ imulq -8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ movq -16(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %rcx, %r14
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %rcx, %r15 ## 8-byte Folded Reload
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %r11, %r9
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %r9
+ adcq %r10, %rbp
+ adcq %r13, %r15
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rdx
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rsi, %rax
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rax, %rbx
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r10, %r8 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r10, %rdi
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ movq -88(%rsp), %r10 ## 8-byte Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r15, %r8
+ adcq %r14, %rbx
+ adcq -104(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -88(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ mulxq %r11, %rsi, %r13
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %r15, %r14
+ mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ movq -40(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r15, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %rdi, %rbp
+ adcq %r8, %r9
+ adcq %rbx, %r14
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ imulq %rbp, %rdx
+ mulxq -72(%rsp), %rax, %rsi ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %r10, %rax, %r15
+ mulxq %r11, %r10, %rdi
+ mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ adcq %rax, %r8
+ mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload
+ adcq %r15, %rax
+ movq -16(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rdx, %r11
+ adcq %rdi, %rdx
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rbp, %r10
+ adcq %r9, %rbx
+ adcq %r14, %r8
+ adcq %r13, %rax
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rbx, %rcx
+ subq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %r8, %rdi
+ sbbq -64(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rax, %rbp
+ sbbq -32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rdx, %r9
+ sbbq -48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq %r15, %r10
+ movq %rsi, %r15
+ sbbq -72(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %rsi, %r15
+ testb %r12b, %r12b
+ cmovneq %rbx, %rcx
+ movq (%rsp), %rsi ## 8-byte Reload
+ movq %rcx, (%rsi)
+ cmovneq %r8, %rdi
+ movq %rdi, 8(%rsi)
+ cmovneq %rax, %rbp
+ movq %rbp, 16(%rsi)
+ cmovneq %rdx, %r9
+ movq %r9, 24(%rsi)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rsi)
+ movq %r15, 40(%rsi)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre6Lbmi2: ## @mcl_fp_addPre6Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre6Lbmi2: ## @mcl_fp_subPre6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_6Lbmi2: ## @mcl_fp_shr1_6Lbmi2
+## BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+
+ .globl _mcl_fp_add6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add6Lbmi2: ## @mcl_fp_add6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB89_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+LBB89_2: ## %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF6Lbmi2: ## @mcl_fp_addNF6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub6Lbmi2: ## @mcl_fp_sub6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB91_2
+## BB#1: ## %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+LBB91_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF6Lbmi2: ## @mcl_fp_subNF6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r10
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rax
+ movd %xmm2, %r11
+ movd %xmm5, %r8
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r14
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r9
+ movd %xmm1, %r15
+ movd %xmm4, %r12
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r13
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm1, %rbp
+ movd %xmm0, %rdx
+ movd %xmm3, %rbx
+ subq %rdx, %rbx
+ sbbq %r13, %rbp
+ sbbq %r15, %r12
+ sbbq %r14, %r9
+ sbbq %r11, %r8
+ sbbq %r10, %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %rax, %rsi
+ andq (%rcx), %rsi
+ movq 40(%rcx), %r10
+ andq %rdx, %r10
+ movq 32(%rcx), %r11
+ andq %rdx, %r11
+ movq 24(%rcx), %r14
+ andq %rdx, %r14
+ rorxq $63, %rdx, %r15
+ andq 16(%rcx), %rdx
+ andq 8(%rcx), %r15
+ addq %rbx, %rsi
+ movq %rsi, (%rdi)
+ adcq %rbp, %r15
+ movq %r15, 8(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+ adcq %rax, %r10
+ movq %r10, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add6Lbmi2: ## @mcl_fpDbl_add6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub6Lbmi2: ## @mcl_fpDbl_sub6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre7Lbmi2: ## @mcl_fp_mulUnitPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ mulxq 48(%rsi), %r8, %r11
+ mulxq 40(%rsi), %r9, %r13
+ mulxq 32(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r12, %r14
+ mulxq (%rsi), %r15, %rbx
+ addq %r12, %rbx
+ mulxq 24(%rsi), %r12, %rax
+ mulxq 16(%rsi), %rdx, %rsi
+ movq %r15, (%rdi)
+ movq %rbx, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r12, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r10, %rax
+ movq %rax, 32(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 40(%rdi)
+ adcq %r8, %r13
+ movq %r13, 48(%rdi)
+ adcq $0, %r11
+ movq %r11, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre7Lbmi2: ## @mcl_fpDbl_mulPre7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %rsi, %r8
+ movq %rdi, %r13
+ movq %r13, -48(%rsp) ## 8-byte Spill
+ movq (%r8), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%r14), %rsi
+ movq %r14, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rsi, %rbp, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %rdx, %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 24(%r8), %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %rbp, %rcx
+ mulxq %rsi, %rbx, %rbp
+ adcq %rax, %rbx
+ movq %rdi, %rdx
+ mulxq %rsi, %r12, %rax
+ adcq %rbp, %r12
+ movq 32(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %rax, %r9
+ movq 40(%r8), %rdi
+ movq %rdi, %rdx
+ mulxq %rsi, %r10, %rax
+ adcq %rbp, %r10
+ movq 48(%r8), %r15
+ movq %r15, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%r13)
+ adcq $0, %r11
+ movq 8(%r14), %r13
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %r14, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rcx, %r14
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq %rbx, %rcx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rbx, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %r12, %rbx
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rbp, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq %r9, %rbp
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rax, %r9
+ adcq %r10, %rax
+ movq %rdi, %rdx
+ mulxq %r13, %r10, %rdi
+ adcq %rsi, %r10
+ movq %r15, %rdx
+ mulxq %r13, %r13, %rdx
+ adcq %r11, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -72(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r9, %r10
+ movq -48(%rsp), %rsi ## 8-byte Reload
+ movq %r14, 8(%rsi)
+ adcq %rdi, %r13
+ adcq %rdx, %r12
+ movq (%r8), %rsi
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq 8(%r8), %r11
+ movq %r11, -104(%rsp) ## 8-byte Spill
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdi
+ movq %rsi, %rdx
+ mulxq %rdi, %r9, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rcx, %r9
+ movq %r11, %rdx
+ mulxq %rdi, %r14, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq %rbx, %r14
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rsi, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ adcq %rbp, %rsi
+ movq 24(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq %rax, %rbp
+ movq 32(%r8), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ adcq %r10, %r11
+ movq 40(%r8), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r15, %rax
+ adcq %r13, %r15
+ movq 48(%r8), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %rcx, %rdx
+ adcq %r12, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -8(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ adcq %rdx, %rbx
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %r9, 16(%rax)
+ movq -64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r9, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ addq %r14, %r9
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r14, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r14
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r11, %r10
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %rsi
+ adcq %r15, %rbp
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r11, %r15
+ adcq %rcx, %r11
+ movq %r13, %rdx
+ mulxq %rdi, %r13, %rcx
+ adcq %rbx, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r11
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r9, 24(%rdi)
+ adcq %r15, %r13
+ adcq %rcx, %r12
+ movq (%r8), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%r8), %rbx
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rcx
+ mulxq %rcx, %rsi, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %rsi
+ movq %rbx, %rdx
+ mulxq %rcx, %r9, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r14, %r9
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq 24(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r15, %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ adcq %rbp, %r15
+ movq 32(%r8), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %rbp
+ adcq %r11, %r10
+ movq 40(%r8), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %rbx
+ adcq %r13, %r11
+ movq 48(%r8), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r14, %rcx
+ adcq %r12, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %rbx, %r14
+ adcq %rcx, %r12
+ movq %rsi, 32(%rdi)
+ movq -64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r13, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ addq %r9, %r13
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rcx, %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rax, %rcx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r15, %rax
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r10, %rbx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %r15
+ adcq %r11, %rbp
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r9, %r11
+ adcq %r14, %r9
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ adcq %r12, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r15, %r9
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ movq %r13, 40(%r14)
+ adcq %r11, %r10
+ adcq %rdx, %rdi
+ movq 48(%rsi), %rdx
+ mulxq (%r8), %r11, %rsi
+ movq %rsi, -64(%rsp) ## 8-byte Spill
+ addq %rcx, %r11
+ mulxq 8(%r8), %rsi, %r15
+ adcq %rax, %rsi
+ mulxq 16(%r8), %rcx, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq %rbx, %rcx
+ mulxq 24(%r8), %rbx, %r12
+ adcq %rbp, %rbx
+ mulxq 32(%r8), %rbp, %r13
+ adcq %r9, %rbp
+ mulxq 40(%r8), %rax, %r9
+ adcq %r10, %rax
+ mulxq 48(%r8), %rdx, %r8
+ adcq %rdi, %rdx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %r11, 48(%r14)
+ movq %rsi, 56(%r14)
+ movq %rcx, 64(%r14)
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq %r12, %rbp
+ movq %rbp, 80(%r14)
+ adcq %r13, %rax
+ movq %rax, 88(%r14)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r14)
+ adcq %r8, %r10
+ movq %r10, 104(%r14)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre7Lbmi2: ## @mcl_fpDbl_sqrPre7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r8, %r10
+ movq 24(%rsi), %rbx
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rdi
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ addq %r12, %rdi
+ adcq %rbp, %r8
+ movq %rbx, %rdx
+ mulxq %rcx, %rbp, %r9
+ adcq %r10, %rbp
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %r14
+ adcq %r9, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %r15
+ adcq %r14, %r10
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r15, %rcx
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ movq -80(%rsp), %rbx ## 8-byte Reload
+ movq %rbx, (%rdx)
+ adcq $0, %r13
+ addq %r12, %rdi
+ movq %rax, %rdx
+ mulxq %rax, %r12, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r8, %r12
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %rbp, %r8
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r9, %rbp
+ adcq %r11, %r9
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r10, %r15
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r11, %rbx
+ adcq %rcx, %r11
+ movq %r14, %rdx
+ mulxq %rax, %r14, %rax
+ adcq %r13, %r14
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %rbp, %r15
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ movq %rdi, 8(%rcx)
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %rbx, %r14
+ adcq %rax, %r13
+ movq (%rsi), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %r12, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rbx, %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq %r8, %r10
+ movq %rbx, %rdx
+ mulxq %rbx, %r12, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r9, %r12
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %r8, %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rcx, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %r11, %rcx
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rbp, %r11
+ adcq %r14, %rbp
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rbx, %r9, %rdx
+ adcq %r13, %r9
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -64(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r11, %r9
+ adcq %rdx, %rbx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %r10, %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r11, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %r12, %r11
+ adcq %r8, %r15
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %rcx, %r8
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r13, %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ adcq %rbp, %r13
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r12, %rbp
+ adcq %r9, %r12
+ movq %r14, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %rbx, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ movq -48(%rsp), %rbx ## 8-byte Reload
+ movq %rbx, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -72(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq %rax, %r10
+ movq (%rsi), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rbx, %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq %r15, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r15, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r8, %rbp
+ adcq %r13, %r8
+ movq %rbx, %rdx
+ mulxq %rbx, %r13, %r14
+ adcq %r12, %r13
+ movq 40(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %rdx, %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %rdx, %rcx
+ movq 48(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r11, %rdx
+ adcq %r10, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %r11
+ adcq %rdx, %r12
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r14, %rdi
+ addq %r9, %r14
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %r15, %rbx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r8, %rbp
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r10, %r15
+ adcq %r13, %r10
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rax, %rdx
+ mulxq %rax, %r9, %r13
+ adcq %r11, %r9
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rax, %r11
+ adcq %r12, %rax
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdi, %rbx
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq -40(%rsp), %rdi ## 8-byte Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ movq %rdx, 32(%rdi)
+ movq %r14, 40(%rdi)
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r13, %rax
+ adcq %r11, %r8
+ movq 48(%rsi), %rdx
+ mulxq (%rsi), %r12, %r11
+ addq %rbx, %r12
+ mulxq 8(%rsi), %rbx, %r14
+ adcq %rbp, %rbx
+ mulxq 16(%rsi), %rbp, %r15
+ adcq %r10, %rbp
+ mulxq 24(%rsi), %rdi, %r10
+ adcq %rcx, %rdi
+ mulxq 32(%rsi), %rcx, %r13
+ adcq %r9, %rcx
+ mulxq 40(%rsi), %rsi, %r9
+ adcq %rax, %rsi
+ mulxq %rdx, %rdx, %rax
+ adcq %r8, %rdx
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %r11, %rbx
+ adcq %r14, %rbp
+ movq -40(%rsp), %r11 ## 8-byte Reload
+ movq %r12, 48(%r11)
+ movq %rbx, 56(%r11)
+ movq %rbp, 64(%r11)
+ adcq %r15, %rdi
+ movq %rdi, 72(%r11)
+ adcq %r10, %rcx
+ movq %rcx, 80(%r11)
+ adcq %r13, %rsi
+ movq %rsi, 88(%r11)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r11)
+ adcq %rax, %r8
+ movq %r8, 104(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont7Lbmi2: ## @mcl_fp_mont7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, 48(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rdx, %r13
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdx, %r8
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %rbp
+ movq 16(%rsi), %rdx
+ movq %rdx, 32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r15
+ movq (%rsi), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r11
+ movq %rbx, %rdx
+ mulxq %rax, %rdx, %r9
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %rsi, %r9
+ adcq %r12, %r11
+ adcq %r14, %r15
+ adcq %r10, %rbp
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -128(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r13
+ movq 16(%rcx), %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r14, %rbp
+ movq 8(%rcx), %rsi
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rsi, %rax
+ movq (%rcx), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ mulxq %rdi, %r8, %r12
+ addq %rsi, %r12
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq 24(%rcx), %rsi
+ movq %rsi, -8(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r10, %r14
+ adcq %rbp, %r10
+ adcq %rbx, %r14
+ movq 40(%rcx), %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbp, %rsi
+ adcq %r13, %rbp
+ movq 48(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq %r11, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq %r15, %r10
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -56(%rsp), %rcx ## 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq 16(%rsp), %r9, %r8 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rdi, %r11 ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ addq %r9, %r11
+ mulxq 32(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq %rcx, %rdi
+ mulxq -32(%rsp), %r13, %rcx ## 8-byte Folded Reload
+ adcq %r9, %r13
+ mulxq -80(%rsp), %r8, %r15 ## 8-byte Folded Reload
+ adcq %rcx, %r8
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ movq -112(%rsp), %r9 ## 8-byte Reload
+ addq %r12, %r9
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ movq %r11, %r12
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r10, %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq %r14, %r13
+ adcq %rbp, %r8
+ adcq %rax, %r15
+ adcq %rbx, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %rsi, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r10, %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq (%rsp), %r14, %r9 ## 8-byte Folded Reload
+ addq %rdi, %r9
+ mulxq -48(%rsp), %rbp, %r11 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ mulxq -40(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %rax
+ adcq %r10, %rcx
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq %r8, %rbx
+ adcq %r15, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %rdi, %r10 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rsi, %r13 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %r8, %r15 ## 8-byte Folded Reload
+ addq %rsi, %r15
+ adcq %rdi, %r13
+ mulxq -32(%rsp), %r12, %rsi ## 8-byte Folded Reload
+ adcq %r10, %r12
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq %rbp, %r15
+ adcq %r11, %r13
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %rbx
+ movq %r8, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r15, %r8
+ adcq %r13, %rbp
+ adcq %r12, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %r10, %r11 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -32(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %r11, %rbx
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ addq %r8, %rdi
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %rbx
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r12, %r8
+ adcq %r15, %rbp
+ adcq %rbx, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 32(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r11 ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %r10, %r13 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %r13, %r10
+ mulxq -80(%rsp), %r13, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r13
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rbx
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %r10
+ adcq %rax, %r13
+ adcq %rcx, %r14
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -88(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbx, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r9, %r11 ## 8-byte Folded Reload
+ addq %rbp, %r11
+ mulxq -48(%rsp), %rbp, %r8 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r8
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r12, %r11
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %r13, %rsi
+ adcq %r14, %rax
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %rbx, %r10 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %r9, %r12 ## 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %rbx, %r13
+ mulxq -32(%rsp), %r15, %rdi ## 8-byte Folded Reload
+ adcq %r10, %r15
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %r9
+ movq %r9, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r8, %r13
+ adcq %rsi, %r15
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %rbx ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %r8
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -16(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq %r13, %r8
+ adcq %r15, %r9
+ adcq %r10, %rdi
+ adcq %r14, %rcx
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -112(%rsp), %r12 ## 8-byte Reload
+ adcq $0, %r12
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -64(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ mulxq 32(%rsp), %r14, %r15 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rdx, %r10 ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ addq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %r15
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ movq -72(%rsp), %r14 ## 8-byte Reload
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ movq -80(%rsp), %r13 ## 8-byte Reload
+ addq %rbx, %r13
+ movq %r13, -80(%rsp) ## 8-byte Spill
+ adcq %r8, %r10
+ adcq %r9, %r11
+ adcq %rdi, %r15
+ adcq %rcx, %rbp
+ movq %rbp, -32(%rsp) ## 8-byte Spill
+ adcq %rsi, %r14
+ movq %r14, -72(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r12, %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ sbbq %rdi, %rdi
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ mulxq (%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq %rbp, %r9
+ mulxq -40(%rsp), %rbp, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -16(%rsp), %rsi, %r14 ## 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq -24(%rsp), %rdx, %rbx ## 8-byte Folded Reload
+ adcq %r14, %rdx
+ adcq $0, %rbx
+ andl $1, %edi
+ addq -80(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r11, %rcx
+ adcq %r15, %r9
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rax, %r8
+ subq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r9, %r11
+ sbbq -48(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq -40(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rdx, %r12
+ sbbq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq -24(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %r13
+ testb %dil, %dil
+ cmovneq %rax, %r8
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rcx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %r9, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %rdx, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF7Lbmi2: ## @mcl_fp_montNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r8, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %r15, %rax
+ adcq %rbx, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rbx, %rdi
+ adcq %rax, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r11, %rax
+ adcq %rdi, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r9, %rdi
+ adcq %rax, %r9
+ movq 48(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r10, %rbp
+ adcq %rdi, %r10
+ adcq $0, %rbp
+ movq -8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r8, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rsi
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ addq %r8, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rsi
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ movq 16(%rcx), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r13
+ adcq %r15, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %rax
+ adcq %rbx, %r12
+ movq 32(%rcx), %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r15, %rbx
+ adcq %r11, %r15
+ movq 40(%rcx), %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r14, %rdi
+ adcq %r9, %r14
+ movq 48(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %rcx
+ adcq %r10, %r11
+ adcq $0, %rbp
+ addq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r13, %r12
+ adcq %rax, %r15
+ adcq %rbx, %r14
+ adcq %rdi, %r11
+ adcq %rcx, %rbp
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -104(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -24(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rdi
+ mulxq -40(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq $0, %r10
+ addq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r12, %rcx
+ adcq %r15, %rsi
+ adcq %r14, %rdi
+ adcq %r11, %r8
+ adcq %rbp, %r9
+ adcq $0, %r10
+ movq %r13, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rbp, %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ addq %r13, %rbp
+ mulxq -72(%rsp), %rbp, %r14 ## 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq 8(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq (%rsp), %r12, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r12
+ mulxq -8(%rsp), %r15, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -16(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ adcq %r8, %r13
+ mulxq -56(%rsp), %rsi, %rdx ## 8-byte Folded Reload
+ adcq %r9, %rsi
+ adcq $0, %r10
+ addq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq %r14, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq %r11, %r12
+ adcq %rcx, %r15
+ adcq %rbx, %r13
+ adcq %rdi, %rsi
+ adcq %rdx, %r10
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -96(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -104(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -32(%rsp), %rax, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -40(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r11 ## 8-byte Folded Reload
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq -128(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq %r13, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r9
+ adcq $0, %r11
+ movq %r14, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ addq %r14, %rsi
+ mulxq -72(%rsp), %rsi, %r13 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq 8(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %rbp, %rdi
+ mulxq (%rsp), %rcx, %rbp ## 8-byte Folded Reload
+ adcq %rbx, %rcx
+ mulxq -8(%rsp), %r14, %rbx ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -16(%rsp), %r12, %rax ## 8-byte Folded Reload
+ adcq %r8, %r12
+ mulxq -56(%rsp), %r10, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r10
+ adcq $0, %r11
+ addq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq %r13, %rdi
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ adcq %r15, %rcx
+ adcq %rbp, %r14
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rdx, %r11
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r15, %rbp ## 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r14, %rsi
+ adcq %r12, %rdi
+ adcq %r10, %r8
+ adcq %r11, %r9
+ adcq $0, %r13
+ movq %r15, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ addq %r15, %rcx
+ mulxq -72(%rsp), %rcx, %r11 ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ mulxq 8(%rsp), %rbp, %r10 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ mulxq (%rsp), %rax, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %rax
+ mulxq -8(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r14
+ mulxq -16(%rsp), %r15, %rdi ## 8-byte Folded Reload
+ adcq %r8, %r15
+ mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r13
+ addq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %rbp
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rdi, %r12
+ adcq %rdx, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rsi, %r8
+ mulxq -104(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ mulxq -24(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -32(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -40(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r10
+ addq %rcx, %r11
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r14, %rbp
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r13, %r9
+ adcq $0, %r10
+ movq %r11, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ addq %r11, %rcx
+ mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ mulxq 8(%rsp), %rax, %r8 ## 8-byte Folded Reload
+ adcq %rbx, %rax
+ mulxq (%rsp), %rbx, %r11 ## 8-byte Folded Reload
+ adcq %rbp, %rbx
+ mulxq -8(%rsp), %r14, %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -16(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r10
+ addq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq %r11, %r14
+ adcq %rbp, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r10
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq %rcx, %r11
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r14, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r8
+ adcq %r10, %r9
+ adcq $0, %r13
+ movq %r11, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ addq %r11, %rcx
+ mulxq -72(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq %rbp, %rcx
+ mulxq 8(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq %rbx, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %r14, %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -8(%rsp), %r11, %r12 ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -16(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %r8, %r10
+ mulxq -56(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %r9, %rdi
+ adcq $0, %r13
+ addq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %r12, %r10
+ adcq %rbx, %rdi
+ adcq %rax, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r8, %rax ## 8-byte Folded Reload
+ addq %rbp, %rax
+ mulxq -104(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %r9, %rbx
+ mulxq -24(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rcx, %rbp
+ mulxq -32(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ adcq %r9, %rcx
+ mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq %r12, %r15
+ mulxq -48(%rsp), %r12, %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq %r11, %rbp
+ adcq %r10, %rcx
+ adcq %rdi, %r15
+ adcq %r13, %r12
+ adcq $0, %r9
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ addq %r8, %rdi
+ mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq %rax, %r8
+ movq 8(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rsi, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq %rbx, %rsi
+ movq (%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rdi, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %rdi
+ movq -8(%rsp), %rbp ## 8-byte Reload
+ mulxq %rbp, %rax, %rbx
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ adcq %rcx, %rax
+ movq -16(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %rcx, %r13
+ adcq %r15, %rcx
+ mulxq -56(%rsp), %rdx, %r15 ## 8-byte Folded Reload
+ adcq %r12, %rdx
+ adcq $0, %r9
+ addq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq %r15, %r9
+ movq %r8, %r13
+ subq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq -72(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq %r11, %r10
+ movq %rax, %r11
+ sbbq %r14, %r11
+ movq %rcx, %r14
+ sbbq %rbp, %r14
+ movq %rdx, %r15
+ sbbq %rbx, %r15
+ movq %r9, %rbp
+ sbbq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r8, %r13
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ movq %r13, (%rbx)
+ cmovsq %rsi, %r12
+ movq %r12, 8(%rbx)
+ cmovsq %rdi, %r10
+ movq %r10, 16(%rbx)
+ cmovsq %rax, %r11
+ movq %r11, 24(%rbx)
+ cmovsq %rcx, %r14
+ movq %r14, 32(%rbx)
+ cmovsq %rdx, %r15
+ movq %r15, 40(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 48(%rbx)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed7Lbmi2: ## @mcl_fp_montRed7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, %rcx
+ movq %rdi, 48(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, %rdx
+ imulq %rax, %rdx
+ movq 48(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rax
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r8
+ movq 24(%rcx), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r15
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ mulxq %rdi, %rdx, %r9
+ addq %rax, %r9
+ adcq %rbp, %r11
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq %r10, %r8
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq -64(%rsp), %rcx ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r13, %rdx
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbp
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rcx
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -80(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -64(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r14, %r12 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %r8, %rax ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %r10 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rsi, %rdi
+ adcq %r8, %r10
+ adcq %r14, %rax
+ movq %rax, %rcx
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %rdx
+ adcq %r11, %rbp
+ adcq %rbx, %rdi
+ adcq %r15, %r10
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rbp, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq %r13, %rcx, %rax
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -24(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rcx, %rax
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r11, %r13 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %r9, %r14 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rax, %rbx ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %rsi, %rbx
+ adcq %r9, %r8
+ adcq %r11, %r14
+ adcq 32(%rsp), %r13 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %rdx
+ adcq %rdi, %rcx
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r12, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq %r15, %rsi, %rax
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rax, %r12
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulxq 16(%rsp), %r9, %rbp ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdi, %r10 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r10
+ adcq 8(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rbx, %rax
+ adcq %r8, %r11
+ adcq %r14, %r10
+ adcq %r13, %rbp
+ adcq -128(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq %r15, %rcx, %r13
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %r9, %r14
+ mulxq -40(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdi ## 8-byte Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r15, %r11
+ mulxq %r11, %rax, %r15
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq -8(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %rax, %r10
+ mulxq -48(%rsp), %rdx, %rsi ## 8-byte Folded Reload
+ addq %rax, %rsi
+ adcq %rdi, %r10
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %rdx
+ adcq %r8, %rsi
+ adcq %rbx, %r10
+ adcq %r14, %rbp
+ adcq %r13, %r15
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ imulq %rsi, %rdx
+ mulxq %r11, %rcx, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %r9, %rbx, %rdi
+ mulxq -48(%rsp), %r11, %r14 ## 8-byte Folded Reload
+ addq %rbx, %r14
+ mulxq -40(%rsp), %rbx, %r13 ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ adcq %rcx, %r13
+ mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ mulxq -16(%rsp), %rdx, %rdi ## 8-byte Folded Reload
+ adcq %r9, %rdx
+ adcq $0, %rdi
+ addq %rsi, %r11
+ adcq %r10, %r14
+ adcq %rbp, %rbx
+ adcq %r15, %r13
+ adcq %r12, %r8
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ movq %r14, %rsi
+ subq -48(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r13, %r9
+ sbbq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r8, %r10
+ sbbq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rdi, %r12
+ sbbq -16(%rsp), %r12 ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rdi, %r12
+ testb %al, %al
+ cmovneq %r14, %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ movq %rsi, (%rdi)
+ cmovneq %rbx, %rbp
+ movq %rbp, 8(%rdi)
+ cmovneq %r13, %r9
+ movq %r9, 16(%rdi)
+ cmovneq %r8, %r10
+ movq %r10, 24(%rdi)
+ cmovneq %rcx, %r11
+ movq %r11, 32(%rdi)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rdi)
+ movq %r12, 48(%rdi)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre7Lbmi2: ## @mcl_fp_addPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre7Lbmi2: ## @mcl_fp_subPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_7Lbmi2: ## @mcl_fp_shr1_7Lbmi2
+## BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+
+ .globl _mcl_fp_add7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add7Lbmi2: ## @mcl_fp_add7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB104_2
+## BB#1: ## %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+LBB104_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF7Lbmi2: ## @mcl_fp_addNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub7Lbmi2: ## @mcl_fp_sub7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB106_2
+## BB#1: ## %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+LBB106_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF7Lbmi2: ## @mcl_fp_subNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r11
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r14
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm2, %r15
+ movd %xmm5, %r9
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r10
+ movd %xmm1, %r13
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rax
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm0, %rbx
+ movd %xmm3, %rsi
+ subq %rbx, %rsi
+ movd %xmm1, %rbx
+ sbbq %rax, %rbx
+ movd %xmm4, %rbp
+ sbbq %r13, %rbp
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ sbbq 48(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ andq (%r8), %rdx
+ movq 48(%r8), %r14
+ andq %rax, %r14
+ movq 40(%r8), %r15
+ andq %rax, %r15
+ movq 32(%r8), %r12
+ andq %rax, %r12
+ movq 24(%r8), %r13
+ andq %rax, %r13
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %rsi, %rdx
+ adcq %rbx, %rax
+ movq %rdx, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %r13
+ movq %r13, 24(%rdi)
+ adcq %r9, %r12
+ movq %r12, 32(%rdi)
+ adcq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq %r11, %r14
+ movq %r14, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add7Lbmi2: ## @mcl_fpDbl_add7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub7Lbmi2: ## @mcl_fpDbl_sub7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv512x64: ## @mulPv512x64
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %rdx
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ adcq $0, %rdx
+ movq %rdx, 64(%rdi)
+ movq %rdi, %rax
+ retq
+
+ .globl _mcl_fp_mulUnitPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre8Lbmi2: ## @mcl_fp_mulUnitPre8Lbmi2
+## BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+
+ .globl _mcl_fpDbl_mulPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre8Lbmi2: ## @mcl_fpDbl_mulPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %r15
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ leaq 32(%r15), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rdx
+ movq (%rbx), %rsi
+ movq 8(%rbx), %rdi
+ addq 32(%rbx), %rsi
+ adcq 40(%rbx), %rdi
+ adcq 16(%rbx), %rdx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rcx
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rcx
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -88(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -80(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdi, %rax
+ movq %rax, -72(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -64(%rbp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rsi, -168(%rbp)
+ movq %rdi, -160(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rcx, -112(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rcx
+ movq %rcx, -48(%rbp) ## 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -56(%rbp) ## 8-byte Spill
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ addq -64(%rbp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rbp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rbp), %r13 ## 8-byte Folded Reload
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ adcq -88(%rbp), %r10 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -56(%rbp), %rdx ## 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -200(%rbp), %r12
+ adcq -192(%rbp), %rbx
+ adcq -184(%rbp), %r13
+ adcq -176(%rbp), %r10
+ adcq %rax, %r15
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -56(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -48(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -80(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -72(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -56(%rbp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -48(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -104(%rbp), %r10 ## 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -96(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre8Lbmi2: ## @mcl_fpDbl_sqrPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ movq %rbx, %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ movq %rsi, %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq 56(%rbx), %r15
+ movq 48(%rbx), %rax
+ movq (%rbx), %rcx
+ movq 8(%rbx), %rdx
+ addq 32(%rbx), %rcx
+ adcq 40(%rbx), %rdx
+ adcq 16(%rbx), %rax
+ adcq 24(%rbx), %r15
+ pushfq
+ popq %r8
+ pushfq
+ popq %r9
+ pushfq
+ popq %r10
+ pushfq
+ popq %rdi
+ pushfq
+ popq %rbx
+ sbbq %rsi, %rsi
+ movq %rsi, -56(%rbp) ## 8-byte Spill
+ leaq (%rcx,%rcx), %rsi
+ xorl %r11d, %r11d
+ pushq %rbx
+ popfq
+ cmovaeq %r11, %rsi
+ movq %rsi, -48(%rbp) ## 8-byte Spill
+ movq %rdx, %r13
+ shldq $1, %rcx, %r13
+ pushq %rdi
+ popfq
+ cmovaeq %r11, %r13
+ movq %rax, %r12
+ shldq $1, %rdx, %r12
+ pushq %r10
+ popfq
+ cmovaeq %r11, %r12
+ movq %r15, %rbx
+ movq %rcx, -168(%rbp)
+ movq %rdx, -160(%rbp)
+ movq %rax, -152(%rbp)
+ movq %r15, -144(%rbp)
+ movq %rcx, -136(%rbp)
+ movq %rdx, -128(%rbp)
+ movq %rax, -120(%rbp)
+ movq %r15, -112(%rbp)
+ shldq $1, %rax, %r15
+ pushq %r9
+ popfq
+ cmovaeq %r11, %r15
+ shrq $63, %rbx
+ pushq %r8
+ popfq
+ cmovaeq %r11, %rbx
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq -56(%rbp), %rax ## 8-byte Reload
+ andl $1, %eax
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ addq -200(%rbp), %r10
+ adcq -192(%rbp), %r13
+ adcq -184(%rbp), %r12
+ adcq -176(%rbp), %r15
+ adcq %rbx, %rax
+ movq %rax, %rbx
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %r9
+ movq %r9, -56(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -48(%rbp) ## 8-byte Spill
+ sbbq %r9, %r10
+ sbbq %r8, %r13
+ movq 48(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 56(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r15
+ sbbq $0, %rbx
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ movq 104(%r14), %rdi
+ sbbq %rdi, %r13
+ movq 112(%r14), %r8
+ sbbq %r8, %r12
+ movq 120(%r14), %r9
+ sbbq %r9, %r15
+ sbbq $0, %rbx
+ addq -56(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -104(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -96(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r10
+ movq %rax, 56(%r14)
+ movq %r10, 64(%r14)
+ adcq -88(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 72(%r14)
+ adcq -80(%rbp), %r12 ## 8-byte Folded Reload
+ movq %r12, 80(%r14)
+ adcq -72(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 88(%r14)
+ movq %rbx, %rax
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont8Lbmi2: ## @mcl_fp_mont8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp ## imm = 0x4E8
+ movq %rcx, %r13
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rsi, 72(%rsp) ## 8-byte Spill
+ movq %rdi, 96(%rsp) ## 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 88(%rsp) ## 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 88(%rsp), %rax ## 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 88(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r15 ## 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq (%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp ## imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF8Lbmi2: ## @mcl_fp_montNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp ## imm = 0x4D8
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ movq %rsi, 56(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 1088(%rsp), %r15
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 952(%rsp), %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 944(%rsp), %r14
+ movq 72(%rsp), %rax ## 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 808(%rsp), %rbp
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 664(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 656(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r12 ## 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 512(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 376(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 368(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 232(%rsp), %r12
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 224(%rsp), %rcx
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 88(%rsp), %rbp
+ movq 32(%rsp), %r11 ## 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq (%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq 80(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp ## imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed8Lbmi2: ## @mcl_fp_montRed8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp ## imm = 0x308
+ movq %rdx, %rax
+ movq %rdi, 192(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 128(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ leaq 704(%rsp), %rdi
+ callq l_mulPv512x64
+ addq 704(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi ## 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rcx ## 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 560(%rsp), %r14
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 56(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 104(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 488(%rsp), %rbp
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 72(%rsp), %rbp ## 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r13 ## 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 16(%rsp), %rbp ## 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 344(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 16(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 96(%rsp), %r14 ## 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 272(%rsp), %r15
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq 104(%rsp), %rdx ## 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 200(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 24(%rsp), %rsi ## 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 136(%rsp), %rax ## 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 144(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 152(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 160(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 168(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 176(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 184(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq 192(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp ## imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre8Lbmi2: ## @mcl_fp_addPre8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre8Lbmi2: ## @mcl_fp_subPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_shr1_8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_8Lbmi2: ## @mcl_fp_shr1_8Lbmi2
+## BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+
+ .globl _mcl_fp_add8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add8Lbmi2: ## @mcl_fp_add8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB120_2
+## BB#1: ## %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+LBB120_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF8Lbmi2: ## @mcl_fp_addNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, %r10
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rbx
+ movq %rbx, %r9
+ movq %r9, -16(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rbp
+ movq %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub8Lbmi2: ## @mcl_fp_sub8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB122_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+LBB122_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF8Lbmi2: ## @mcl_fp_subNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ movdqu 48(%rdx), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r12
+ movdqu (%rsi), %xmm4
+ movdqu 16(%rsi), %xmm5
+ movdqu 32(%rsi), %xmm8
+ movdqu 48(%rsi), %xmm7
+ pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm3, %r13
+ movd %xmm7, %rdi
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1]
+ movd %xmm3, %rdx
+ movd %xmm2, %rsi
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r11
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %r15
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r14
+ subq %rax, %r14
+ movd %xmm1, %r10
+ sbbq %rbx, %r10
+ movd %xmm5, %rbx
+ sbbq %r15, %rbx
+ movd %xmm2, %r15
+ sbbq %r11, %r15
+ movd %xmm8, %r11
+ sbbq %rsi, %r11
+ sbbq %rbp, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq %r13, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %r12, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ sarq $63, %rbp
+ movq 56(%r8), %r12
+ andq %rbp, %r12
+ movq 48(%r8), %r13
+ andq %rbp, %r13
+ movq 40(%r8), %rdi
+ andq %rbp, %rdi
+ movq 32(%r8), %rsi
+ andq %rbp, %rsi
+ movq 24(%r8), %rdx
+ andq %rbp, %rdx
+ movq 16(%r8), %rcx
+ andq %rbp, %rcx
+ movq 8(%r8), %rax
+ andq %rbp, %rax
+ andq (%r8), %rbp
+ addq %r14, %rbp
+ adcq %r10, %rax
+ movq %rbp, (%r9)
+ adcq %rbx, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r15, %rdx
+ movq %rdx, 24(%r9)
+ adcq %r11, %rsi
+ movq %rsi, 32(%r9)
+ adcq -24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 48(%r9)
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add8Lbmi2: ## @mcl_fpDbl_add8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rax
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub8Lbmi2: ## @mcl_fpDbl_sub8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ sbbq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv576x64: ## @mulPv576x64
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ mulxq 64(%rsi), %rax, %rcx
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 72(%rdi)
+ movq %rdi, %rax
+ retq
+
+ .globl _mcl_fp_mulUnitPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre9Lbmi2: ## @mcl_fp_mulUnitPre9Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre9Lbmi2: ## @mcl_fpDbl_mulPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rdx, %rax
+ movq %rdi, %r12
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ movq %rbp, 72(%rsp) ## 8-byte Spill
+ callq l_mulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r13 ## 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq 48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r12 ## 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq 56(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq 56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre9Lbmi2: ## @mcl_fpDbl_sqrPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rsi, %r15
+ movq %rdi, %r14
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 80(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq %r14, 72(%rsp) ## 8-byte Spill
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 32(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont9Lbmi2: ## @mcl_fp_mont9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 96(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r15 ## 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 56(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %r14 ## 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 104(%rsp), %rbp ## 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r9 ## 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r10 ## 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 72(%rsp), %rdx ## 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF9Lbmi2: ## @mcl_fp_montNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 80(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1392(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 104(%rsp), %r15 ## 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 104(%rsp) ## 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 56(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1232(%rsp), %rax
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1072(%rsp), %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 920(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 912(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdx ## 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 760(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r15 ## 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 752(%rsp), %rcx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 600(%rsp), %r13
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 56(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 440(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 432(%rsp), %rcx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rdx ## 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 280(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 272(%rsp), %rcx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ addq 120(%rsp), %rbx
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdi ## 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 40(%rsp), %r9 ## 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed9Lbmi2: ## @mcl_fp_montRed9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp ## imm = 0x3A8
+ movq %rdx, %rax
+ movq %rdi, 208(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 88(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 80(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 200(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 192(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 104(%rsp) ## 8-byte Spill
+ leaq 856(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 856(%rsp), %r14
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 120(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 8(%rsp) ## 8-byte Folded Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, 128(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 696(%rsp), %r15
+ movq 128(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbp ## 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 616(%rsp), %rbx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 112(%rsp), %rcx ## 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 64(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 48(%rsp) ## 8-byte Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 536(%rsp), %rbx
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 72(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 456(%rsp), %r14
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 88(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 376(%rsp), %r15
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 80(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 296(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r9 ## 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 144(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 136(%rsp), %rdi ## 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 152(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 160(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 168(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 176(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 184(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 192(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 200(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq (%rsp), %rsi ## 8-byte Folded Reload
+ movq 208(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp ## imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre9Lbmi2: ## @mcl_fp_addPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre9Lbmi2: ## @mcl_fp_subPre9Lbmi2
+## BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_9Lbmi2: ## @mcl_fp_shr1_9Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_add9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add9Lbmi2: ## @mcl_fp_add9Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB136_2
+## BB#1: ## %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+LBB136_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF9Lbmi2: ## @mcl_fp_addNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r11
+ movq %r11, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub9Lbmi2: ## @mcl_fp_sub9Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB138_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+LBB138_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF9Lbmi2: ## @mcl_fp_subNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r10
+ movq %rdi, %rbx
+ movq 64(%rsi), %r11
+ movdqu (%rdx), %xmm1
+ movdqu 16(%rdx), %xmm2
+ movdqu 32(%rdx), %xmm3
+ movdqu 48(%rdx), %xmm4
+ pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1]
+ movd %xmm0, %r8
+ movdqu (%rsi), %xmm5
+ movdqu 16(%rsi), %xmm6
+ movdqu 32(%rsi), %xmm7
+ movdqu 48(%rsi), %xmm8
+ pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r9
+ pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1]
+ movd %xmm0, %rdi
+ pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1]
+ movd %xmm3, %rcx
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1]
+ movd %xmm2, %r13
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %rsi
+ movd %xmm5, %r15
+ subq %rsi, %r15
+ movd %xmm2, %r14
+ sbbq %r12, %r14
+ movd %xmm6, %r12
+ sbbq %r13, %r12
+ movd %xmm3, %r13
+ sbbq %rbp, %r13
+ movd %xmm7, %rsi
+ sbbq %rcx, %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ movd %xmm0, %rcx
+ sbbq %rdi, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movd %xmm8, %rcx
+ sbbq %r9, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ sbbq %r8, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ sbbq 64(%rdx), %r11
+ movq %r11, -40(%rsp) ## 8-byte Spill
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbp
+ shldq $1, %r11, %rbp
+ movq 24(%r10), %r9
+ andq %rbp, %r9
+ movq 8(%r10), %rdi
+ andq %rbp, %rdi
+ andq (%r10), %rbp
+ movq 64(%r10), %r11
+ andq %rdx, %r11
+ rorxq $63, %rdx, %rax
+ andq 56(%r10), %rdx
+ movq 48(%r10), %r8
+ andq %rax, %r8
+ movq 40(%r10), %rsi
+ andq %rax, %rsi
+ movq 32(%r10), %rcx
+ andq %rax, %rcx
+ andq 16(%r10), %rax
+ addq %r15, %rbp
+ adcq %r14, %rdi
+ movq %rbp, (%rbx)
+ adcq %r12, %rax
+ movq %rdi, 8(%rbx)
+ adcq %r13, %r9
+ movq %rax, 16(%rbx)
+ movq %r9, 24(%rbx)
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 32(%rbx)
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rbx)
+ adcq -32(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ adcq -8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rbx)
+ adcq -40(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 64(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add9Lbmi2: ## @mcl_fpDbl_add9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -8(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi ## 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -48(%rsp), %r8 ## 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub9Lbmi2: ## @mcl_fpDbl_sub9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ sbbq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -40(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+
+.subsections_via_symbols
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s
new file mode 100644
index 000000000..0dc7014a3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s
@@ -0,0 +1,16313 @@
+ .section __TEXT,__text,regular,pure_instructions
+ .macosx_version_min 10, 12
+ .globl _makeNIST_P192L
+ .p2align 4, 0x90
+_makeNIST_P192L: ## @makeNIST_P192L
+## BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P192L
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P192L: ## @mcl_fpDbl_mod_NIST_P192L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_sqr_NIST_P192L
+ .p2align 4, 0x90
+_mcl_fp_sqr_NIST_P192L: ## @mcl_fp_sqr_NIST_P192L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %rcx, %r12
+ adcq %r14, %r15
+ movq %rdi, %r10
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %rbp, %rcx
+ movq %r9, %rbx
+ adcq $0, %rbx
+ addq %r13, %rsi
+ adcq %r12, %rcx
+ adcq %r15, %rbx
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %r11
+ addq %r14, %r9
+ adcq %rdi, %rax
+ adcq $0, %rdx
+ addq %rbp, %rcx
+ adcq %rbx, %r9
+ adcq %r10, %rax
+ adcq $0, %rdx
+ addq %rdx, %rsi
+ adcq $0, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r9, %r8
+ adcq %rax, %rsi
+ adcq %rdx, %rcx
+ adcq $0, %rbp
+ addq %rdx, %r8
+ adcq %r9, %rsi
+ adcq %rax, %rcx
+ adcq $0, %rbp
+ addq %rbp, %r8
+ adcq %rsi, %rbp
+ adcq $0, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r8, %rdx
+ addq $1, %rdx
+ movq %rbp, %rsi
+ adcq $1, %rsi
+ movq %rcx, %rdi
+ adcq $0, %rdi
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r8, %rdx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rdx, (%rbx)
+ testb %al, %al
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rbx)
+ cmovneq %rcx, %rdi
+ movq %rdi, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulNIST_P192L
+ .p2align 4, 0x90
+_mcl_fp_mulNIST_P192L: ## @mcl_fp_mulNIST_P192L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq _mcl_fpDbl_mulPre3L
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P521L
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P521L: ## @mcl_fpDbl_mod_NIST_P521L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ ## kill: %EBX<def> %EBX<kill> %RBX<kill> %RBX<def>
+ andl $511, %ebx ## imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movl %ebx, %esi
+ shrl $9, %esi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx ## imm = 0xFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je LBB4_1
+## BB#3: ## %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx ## imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp LBB4_2
+LBB4_1: ## %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+LBB4_2: ## %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre1L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre1L: ## @mcl_fp_mulUnitPre1L
+## BB#0:
+ movq %rdx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre1L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre1L: ## @mcl_fpDbl_mulPre1L
+## BB#0:
+ movq (%rdx), %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre1L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre1L: ## @mcl_fpDbl_sqrPre1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq %rax
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mont1L
+ .p2align 4, 0x90
+_mcl_fp_mont1L: ## @mcl_fp_mont1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, (%rdi)
+ retq
+
+ .globl _mcl_fp_montNF1L
+ .p2align 4, 0x90
+_mcl_fp_montNF1L: ## @mcl_fp_montNF1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ movq %rdx, %rax
+ subq %rcx, %rax
+ cmovsq %rdx, %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_montRed1L
+ .p2align 4, 0x90
+_mcl_fp_montRed1L: ## @mcl_fp_montRed1L
+## BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ mulq %r8
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+
+ .globl _mcl_fp_addPre1L
+ .p2align 4, 0x90
+_mcl_fp_addPre1L: ## @mcl_fp_addPre1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre1L
+ .p2align 4, 0x90
+_mcl_fp_subPre1L: ## @mcl_fp_subPre1L
+## BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_1L
+ .p2align 4, 0x90
+_mcl_fp_shr1_1L: ## @mcl_fp_shr1_1L
+## BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_add1L
+ .p2align 4, 0x90
+_mcl_fp_add1L: ## @mcl_fp_add1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB14_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+LBB14_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF1L
+ .p2align 4, 0x90
+_mcl_fp_addNF1L: ## @mcl_fp_addNF1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_sub1L
+ .p2align 4, 0x90
+_mcl_fp_sub1L: ## @mcl_fp_sub1L
+## BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB16_2
+## BB#1: ## %nocarry
+ retq
+LBB16_2: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_subNF1L
+ .p2align 4, 0x90
+_mcl_fp_subNF1L: ## @mcl_fp_subNF1L
+## BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add1L
+ .p2align 4, 0x90
+_mcl_fpDbl_add1L: ## @mcl_fpDbl_add1L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub1L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub1L: ## @mcl_fpDbl_sub1L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre2L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre2L: ## @mcl_fp_mulUnitPre2L
+## BB#0:
+ movq %rdx, %r8
+ movq %r8, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre2L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre2L: ## @mcl_fpDbl_mulPre2L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%r10), %rcx
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ addq %r9, %rsi
+ adcq $0, %r14
+ movq 8(%r10), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %rbx
+ addq %rsi, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_sqrPre2L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre2L: ## @mcl_fpDbl_sqrPre2L
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %r8
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %r10
+ addq %r10, %rsi
+ movq %r9, %rcx
+ adcq $0, %rcx
+ movq %r8, %rax
+ mulq %r8
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rax
+ movq %rax, 16(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mont2L
+ .p2align 4, 0x90
+_mcl_fp_mont2L: ## @mcl_fp_mont2L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rsi
+ movq 8(%rdx), %r9
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq $0, %r15
+ movq -8(%rcx), %r10
+ movq (%rcx), %rbp
+ movq %r14, %rsi
+ imulq %r10, %rsi
+ movq 8(%rcx), %rdi
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r13, %rbx
+ adcq %r15, %rcx
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rsi
+ adcq %r15, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbp
+ addq %r9, %rdx
+ adcq $0, %rcx
+ addq %r8, %rax
+ adcq %rsi, %rdx
+ adcq %r14, %rcx
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rcx, %rsi
+ sbbq %rdi, %rsi
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rcx, %rsi
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ movq %rsi, 8(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF2L
+ .p2align 4, 0x90
+_mcl_fp_montNF2L: ## @mcl_fp_montNF2L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rbp
+ movq 8(%rdx), %r9
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %r14
+ movq -8(%rcx), %r10
+ movq (%rcx), %r15
+ movq %rsi, %rbx
+ imulq %r10, %rbx
+ movq 8(%rcx), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ addq %r14, %rbp
+ adcq $0, %rcx
+ addq %rsi, %rbx
+ adcq %r13, %rbp
+ adcq $0, %rcx
+ addq %r12, %rbp
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq $0, %rsi
+ addq %rbp, %r8
+ adcq %rcx, %rbx
+ adcq $0, %rsi
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r10, %rax
+ mulq %r15
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %rcx, %rsi
+ movq %rbp, %rax
+ subq %r15, %rax
+ movq %rsi, %rcx
+ sbbq %rdi, %rcx
+ cmovsq %rbp, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed2L
+ .p2align 4, 0x90
+_mcl_fp_montRed2L: ## @mcl_fp_montRed2L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r9
+ movq (%rdx), %r11
+ movq (%rsi), %rbx
+ movq %rbx, %rcx
+ imulq %r9, %rcx
+ movq 8(%rdx), %r14
+ movq %rcx, %rax
+ mulq %r14
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq %r11
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq $0, %r8
+ movq 24(%rsi), %r15
+ addq %rbx, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq $0, %r15
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %rcx, %r9
+ movq %r9, %rax
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %r11
+ addq %r10, %rdx
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r8, %rdx
+ adcq %r15, %rsi
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r14, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addPre2L
+ .p2align 4, 0x90
+_mcl_fp_addPre2L: ## @mcl_fp_addPre2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre2L
+ .p2align 4, 0x90
+_mcl_fp_subPre2L: ## @mcl_fp_subPre2L
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_2L
+ .p2align 4, 0x90
+_mcl_fp_shr1_2L: ## @mcl_fp_shr1_2L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_add2L
+ .p2align 4, 0x90
+_mcl_fp_add2L: ## @mcl_fp_add2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB29_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+LBB29_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF2L
+ .p2align 4, 0x90
+_mcl_fp_addNF2L: ## @mcl_fp_addNF2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_sub2L
+ .p2align 4, 0x90
+_mcl_fp_sub2L: ## @mcl_fp_sub2L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB31_2
+## BB#1: ## %nocarry
+ retq
+LBB31_2: ## %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF2L
+ .p2align 4, 0x90
+_mcl_fp_subNF2L: ## @mcl_fp_subNF2L
+## BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add2L
+ .p2align 4, 0x90
+_mcl_fpDbl_add2L: ## @mcl_fpDbl_add2L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub2L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub2L: ## @mcl_fpDbl_sub2L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre3L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre3L: ## @mcl_fp_mulUnitPre3L
+## BB#0:
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r9, %r10
+ movq %r10, 16(%rdi)
+ adcq $0, %r8
+ movq %r8, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre3L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre3L: ## @mcl_fpDbl_mulPre3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%r10), %rbx
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ addq %rcx, %rbx
+ adcq %rsi, %r15
+ adcq $0, %r14
+ movq 8(%r10), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 8(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r12, %r14
+ movq 16(%r10), %r15
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %r15
+ addq %rsi, %rax
+ movq %rax, 16(%rdi)
+ adcq %rbp, %rcx
+ adcq %r14, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre3L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre3L: ## @mcl_fpDbl_sqrPre3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, (%rdi)
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r12
+ addq %r12, %rbx
+ movq %r14, %r13
+ adcq %r11, %r13
+ movq %r8, %rcx
+ adcq $0, %rcx
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rsi
+ movq %rax, %rsi
+ addq %r12, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r13, %rsi
+ adcq %r15, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ movq %r10, %rax
+ mulq %r10
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r15, %rcx
+ adcq %rbx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rax
+ movq %rax, 32(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mont3L
+ .p2align 4, 0x90
+_mcl_fp_mont3L: ## @mcl_fp_mont3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r10
+ movq (%rdx), %rdi
+ movq %rdx, %r11
+ movq %r11, -16(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %rbx
+ movq %rdx, %r15
+ movq (%rsi), %rbp
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r13
+ addq %rsi, %r13
+ adcq %rbx, %r12
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq %r8, %rbp
+ imulq %r14, %rbp
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %r9, %rsi
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %r13, %rbp
+ movq 8(%r11), %rcx
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r8
+ movq %rcx, %rax
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r9, %rcx
+ adcq %r8, %r12
+ adcq $0, %r15
+ addq %rbp, %r13
+ adcq %rsi, %rcx
+ adcq %rbx, %r12
+ adcq %rdi, %r15
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r13, %rdi
+ imulq %r14, %rdi
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rsi
+ adcq $0, %rbx
+ addq %r13, %rax
+ adcq %rcx, %rbp
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ adcq $0, %r11
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r10
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq %r15, %r10
+ adcq $0, %r8
+ addq %rbp, %r9
+ adcq %rsi, %rcx
+ adcq %rbx, %r10
+ adcq %r11, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ imulq %r9, %r14
+ movq %r14, %rax
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %r14, %rax
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %r14, %rax
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ mulq %rbp
+ addq %r13, %rdx
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rcx, %rdx
+ adcq %r10, %rsi
+ adcq %r8, %rbx
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rsi, %rcx
+ sbbq %r12, %rcx
+ movq %rbx, %rbp
+ sbbq %r15, %rbp
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %rbp
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ movq %rbp, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF3L
+ .p2align 4, 0x90
+_mcl_fp_montNF3L: ## @mcl_fp_montNF3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%r10), %rbp
+ movq %r10, -16(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ movq %r11, -24(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rsi), %rbx
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r8, %rbp
+ adcq %r14, %rdi
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq %r13, %rbx
+ imulq %r14, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, %r8
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ addq %r13, %rax
+ adcq %rbp, %rcx
+ adcq %rdi, %r12
+ adcq $0, %r15
+ addq %rdx, %rcx
+ movq 8(%r10), %rbp
+ adcq %r9, %r12
+ adcq %r8, %r15
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ addq %rcx, %r13
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ movq %r13, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -40(%rsp), %rdi ## 8-byte Reload
+ mulq %rdi
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %r12
+ adcq %rbx, %r15
+ adcq $0, %rsi
+ addq %rdx, %r12
+ adcq %r9, %r15
+ adcq %r8, %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r10
+ movq %rax, %r10
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbp
+ addq %r12, %r10
+ adcq %r15, %rbx
+ adcq %rsi, %rcx
+ adcq $0, %rbp
+ imulq %r10, %r14
+ movq %r14, %rax
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %r8
+ movq %rax, %rsi
+ movq %r14, %rax
+ movq %rdi, %r12
+ mulq %r12
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq %r11
+ addq %r10, %rax
+ adcq %rbx, %rdi
+ adcq %rcx, %rsi
+ adcq $0, %rbp
+ addq %rdx, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbp
+ movq %rdi, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r12, %rcx
+ movq %rbp, %rbx
+ sbbq %r15, %rbx
+ movq %rbx, %rdx
+ sarq $63, %rdx
+ cmovsq %rdi, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovsq %rbp, %rbx
+ movq %rbx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed3L
+ .p2align 4, 0x90
+_mcl_fp_montRed3L: ## @mcl_fp_montRed3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r9
+ movq (%rcx), %rdi
+ movq (%rsi), %r15
+ movq %r15, %rbx
+ imulq %r9, %rbx
+ movq 16(%rcx), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 8(%rcx), %rcx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %r12
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdi, %rbx
+ movq %rbx, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %rcx
+ addq %r14, %rcx
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %r13
+ addq %r15, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r8
+ adcq $0, %r13
+ adcq $0, %rdi
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %rcx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r12
+ movq %rdx, %r14
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %rbp, %r14
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r10, %rbx
+ adcq %r8, %r14
+ adcq %r13, %r11
+ adcq $0, %rdi
+ adcq $0, %r15
+ imulq %rbx, %r9
+ movq %r9, %rax
+ movq -24(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r9, %rax
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ mulq %rcx
+ addq %r10, %rdx
+ adcq %r8, %rsi
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r14, %rdx
+ adcq %r11, %rsi
+ adcq %rdi, %rbp
+ adcq $0, %r15
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rsi, %rdi
+ sbbq %r13, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %r15
+ andl $1, %r15d
+ cmovneq %rbp, %rcx
+ testb %r15b, %r15b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre3L
+ .p2align 4, 0x90
+_mcl_fp_addPre3L: ## @mcl_fp_addPre3L
+## BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre3L
+ .p2align 4, 0x90
+_mcl_fp_subPre3L: ## @mcl_fp_subPre3L
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_3L
+ .p2align 4, 0x90
+_mcl_fp_shr1_3L: ## @mcl_fp_shr1_3L
+## BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_add3L
+ .p2align 4, 0x90
+_mcl_fp_add3L: ## @mcl_fp_add3L
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB44_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+LBB44_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF3L
+ .p2align 4, 0x90
+_mcl_fp_addNF3L: ## @mcl_fp_addNF3L
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_sub3L
+ .p2align 4, 0x90
+_mcl_fp_sub3L: ## @mcl_fp_sub3L
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB46_2
+## BB#1: ## %nocarry
+ retq
+LBB46_2: ## %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF3L
+ .p2align 4, 0x90
+_mcl_fp_subNF3L: ## @mcl_fp_subNF3L
+## BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add3L
+ .p2align 4, 0x90
+_mcl_fpDbl_add3L: ## @mcl_fpDbl_add3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sub3L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub3L: ## @mcl_fpDbl_sub3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre4L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre4L: ## @mcl_fp_mulUnitPre4L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r10
+ movq %r10, 24(%rdi)
+ adcq $0, %r8
+ movq %r8, 32(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre4L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre4L: ## @mcl_fpDbl_mulPre4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbx
+ movq %rdx, %rbp
+ mulq %rbx
+ movq %rdx, %r15
+ movq 16(%rsi), %rcx
+ movq 24(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %rcx, %rax
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r8
+ addq %r15, %r8
+ adcq %r9, %r13
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq %rbp, %r9
+ movq %r9, -8(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rbp
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %r8, %rax
+ movq %rax, 8(%rdi)
+ adcq %r13, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ movq 16(%r9), %rbp
+ movq %r14, %rax
+ mulq %rbp
+ movq %rax, %r12
+ movq %rdx, %r14
+ andl $1, %r13d
+ addq -48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ addq %rbx, %rax
+ movq %rax, 16(%rdi)
+ adcq %r12, %rcx
+ adcq %r15, %r10
+ adcq %r13, %r11
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %rcx
+ adcq %r14, %r10
+ adcq %r9, %r11
+ adcq %r8, %r13
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq (%rsi)
+ addq %rcx, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %r12
+ adcq %r13, %r14
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ adcq %r9, %r14
+ movq %r14, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre4L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre4L: ## @mcl_fpDbl_sqrPre4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rsi, %r10
+ movq 16(%r10), %r9
+ movq 24(%r10), %r11
+ movq (%r10), %r15
+ movq 8(%r10), %r8
+ movq %r15, %rax
+ mulq %r15
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %r8
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %r15
+ addq %rax, %rbp
+ movq %rdx, %r8
+ adcq %r12, %r8
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ addq %rax, %rbp
+ movq %rbp, 8(%rdi)
+ adcq %r14, %r8
+ movq -40(%rsp), %rsi ## 8-byte Reload
+ adcq %rsi, %rcx
+ adcq -32(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %rdx, %r8
+ adcq %r13, %rcx
+ movq -24(%rsp), %r15 ## 8-byte Reload
+ adcq %r15, %rbx
+ adcq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r9
+ movq %rax, %r9
+ addq %r12, %r8
+ movq %r8, 16(%rdi)
+ adcq %rsi, %rcx
+ adcq %rbx, %r9
+ adcq %rbp, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ movq 24(%r10), %rbp
+ movq %rbp, %rax
+ mulq 16(%r10)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 8(%r10)
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq (%r10)
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r9, %rbx
+ adcq %r11, %r14
+ adcq %r12, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r15, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r13, %r14
+ movq %r14, 40(%rdi)
+ adcq %r8, %rax
+ movq %rax, 48(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont4L
+ .p2align 4, 0x90
+_mcl_fp_mont4L: ## @mcl_fp_mont4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, %r9
+ movq %rdx, %r8
+ movq 16(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %rbx
+ movq %rdx, %r11
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %r15
+ addq %rsi, %r15
+ adcq %rbx, %r12
+ adcq %r9, %r11
+ adcq $0, %r8
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r9
+ movq 16(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %rbx
+ movq (%rcx), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rsi
+ addq %rcx, %rsi
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq $0, %r9
+ addq %r13, %rax
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r11, %rbx
+ adcq %r8, %r9
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rbp
+ movq %rbp, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r14, %rbp
+ adcq %r11, %rcx
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rsi, %r8
+ adcq %rdi, %rbp
+ adcq %rbx, %rcx
+ adcq %r9, %r13
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r8, %rsi
+ imulq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r14, %rsi
+ adcq %r11, %rdi
+ adcq %r10, %rbx
+ adcq $0, %r9
+ addq %r8, %rax
+ adcq %rbp, %rsi
+ adcq %rcx, %rdi
+ adcq %r13, %rbx
+ adcq %r12, %r9
+ adcq $0, %r15
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbp
+ movq %rbp, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rbp
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r11, %rcx
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rsi, %rbp
+ adcq %rdi, %r8
+ adcq %rbx, %rcx
+ adcq %r9, %r13
+ adcq %r15, %r12
+ sbbq %r14, %r14
+ movq %rbp, %rsi
+ imulq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ andl $1, %r14d
+ addq %r15, %r11
+ adcq %r10, %r9
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rax
+ adcq %r8, %r11
+ adcq %rcx, %r9
+ adcq %r13, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq %r15, %rsi
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r11, %r10
+ adcq %r9, %rbp
+ adcq %rbx, %rsi
+ adcq %rdi, %r12
+ adcq %r14, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ imulq %r10, %rcx
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %r9, %rax
+ movq -24(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ addq %r14, %rdx
+ adcq %rbx, %rcx
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r10, %rax
+ adcq %rbp, %rdx
+ adcq %rsi, %rcx
+ adcq %r12, %r15
+ adcq %r8, %r13
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rcx, %rsi
+ sbbq %r11, %rsi
+ movq %r15, %rbp
+ sbbq -80(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r13, %rbx
+ sbbq -72(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rbx
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rcx, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %r15, %rbp
+ movq %rbp, 16(%rdx)
+ movq %rbx, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF4L
+ .p2align 4, 0x90
+_mcl_fp_montNF4L: ## @mcl_fp_montNF4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r15
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq (%r15), %rdi
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r9
+ addq %rsi, %r9
+ adcq %r14, %rbx
+ adcq %r8, %r10
+ adcq $0, %r12
+ movq -8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r11, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq (%rcx), %rdi
+ movq %rdi, -72(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %rdi
+ addq %r11, %rax
+ adcq %r9, %rbp
+ adcq %rbx, %r8
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rdx, %rbp
+ adcq %rcx, %r8
+ adcq %r14, %r13
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq 8(%r15), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r9
+ addq %r14, %r9
+ adcq %r11, %rcx
+ adcq %rsi, %r10
+ adcq $0, %rbx
+ addq %rbp, %rdi
+ adcq %r8, %r9
+ adcq %r13, %rcx
+ adcq %r12, %r10
+ adcq $0, %rbx
+ movq %rdi, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rsi, %rax
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r9, %rbp
+ adcq %rcx, %r13
+ adcq %r10, %r12
+ adcq $0, %rbx
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r8, %rbx
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq $0, %rsi
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %rcx
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r9, %rbx
+ imulq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rdi, %rbp
+ adcq %rcx, %r13
+ adcq %r8, %r12
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rsi
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %r10
+ adcq %rcx, %r8
+ adcq $0, %rbx
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %r10
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ movq -80(%rsp), %rcx ## 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rcx, %rax
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ movq %rcx, %r15
+ movq -72(%rsp), %rsi ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %r15, %rax
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r9, %rcx
+ adcq %rdi, %rax
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %rbx
+ addq %r14, %rax
+ adcq %rdx, %rbp
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %rcx
+ subq %rsi, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %r13, %rdi
+ sbbq %r11, %rdi
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %r13, %rdi
+ movq %rdi, 16(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed4L
+ .p2align 4, 0x90
+_mcl_fp_montRed4L: ## @mcl_fp_montRed4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq (%rcx), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, %rbx
+ imulq %rax, %rbx
+ movq %rax, %r9
+ movq %r9, -64(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 16(%rcx), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, %r13
+ movq %r13, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 8(%rcx), %rcx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 56(%rsi), %rcx
+ movq 48(%rsi), %rdx
+ addq %r12, %rax
+ movq 40(%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r8
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, %r12
+ adcq $0, %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r13
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %r9, %rcx
+ adcq -56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbx, %rax
+ adcq %r15, %rsi
+ adcq %r10, %rcx
+ adcq %r8, %r14
+ adcq -48(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq %r12, -48(%rsp) ## 8-byte Spill
+ movq -72(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %rdi
+ movq %rsi, %rbx
+ imulq -64(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %rax
+ adcq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -48(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ imulq %rbx, %rcx
+ movq %rcx, %rax
+ mulq %r12
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq -24(%rsp), %r14 ## 8-byte Reload
+ mulq %r14
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -16(%rsp), %rsi ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -32(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ addq %rcx, %rdx
+ adcq %r12, %rbp
+ adcq -64(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %rax
+ adcq %r15, %rdx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rbp, %rcx
+ sbbq %rsi, %rcx
+ movq %r11, %rbx
+ sbbq %r14, %rbx
+ movq %r13, %rsi
+ sbbq -40(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rsi
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r11, %rbx
+ movq %rbx, 16(%rdx)
+ movq %rsi, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre4L
+ .p2align 4, 0x90
+_mcl_fp_addPre4L: ## @mcl_fp_addPre4L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre4L
+ .p2align 4, 0x90
+_mcl_fp_subPre4L: ## @mcl_fp_subPre4L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_4L
+ .p2align 4, 0x90
+_mcl_fp_shr1_4L: ## @mcl_fp_shr1_4L
+## BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_add4L
+ .p2align 4, 0x90
+_mcl_fp_add4L: ## @mcl_fp_add4L
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB59_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+LBB59_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF4L
+ .p2align 4, 0x90
+_mcl_fp_addNF4L: ## @mcl_fp_addNF4L
+## BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_sub4L
+ .p2align 4, 0x90
+_mcl_fp_sub4L: ## @mcl_fp_sub4L
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB61_2
+## BB#1: ## %nocarry
+ retq
+LBB61_2: ## %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF4L
+ .p2align 4, 0x90
+_mcl_fp_subNF4L: ## @mcl_fp_subNF4L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r8
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r15
+ movd %xmm1, %r9
+ movd %xmm3, %r11
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r10
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r14
+ movd %xmm0, %rdx
+ movd %xmm2, %r12
+ subq %rdx, %r12
+ sbbq %r10, %r14
+ sbbq %r9, %r11
+ sbbq %r8, %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r12, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add4L
+ .p2align 4, 0x90
+_mcl_fpDbl_add4L: ## @mcl_fpDbl_add4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub4L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub4L: ## @mcl_fpDbl_sub4L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre5L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre5L: ## @mcl_fp_mulUnitPre5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r9, %r10
+ movq %r10, 32(%rdi)
+ adcq $0, %r8
+ movq %r8, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre5L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre5L: ## @mcl_fpDbl_mulPre5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %r13
+ movq 24(%r9), %r15
+ movq 32(%r9), %rbx
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %r10
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %r13, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %rcx
+ movq 8(%r9), %r8
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ movq %rax, %r12
+ addq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rdi, %rcx
+ adcq %r10, %r14
+ adcq $0, %r11
+ movq -72(%rsp), %r10 ## 8-byte Reload
+ movq 8(%r10), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %r15, %rax
+ mulq %rdi
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %rdi
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r8, %rax
+ mulq %rdi
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rdi
+ addq %r12, %rax
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rbx
+ adcq %rcx, %r13
+ adcq %r14, %r15
+ adcq %r11, %rsi
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rbx
+ adcq %r8, %r13
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq 32(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq 16(%r10), %r12
+ mulq %r12
+ movq %rax, %r11
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %r12
+ movq %rax, %r10
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %r12
+ movq %rax, %r8
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rdi
+ movq %rdi, %rax
+ mulq %r12
+ movq %rax, %rbp
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq (%r9), %r14
+ movq %r14, %rax
+ mulq %r12
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %rbx, %rax
+ movq -48(%rsp), %rbx ## 8-byte Reload
+ movq %rax, 16(%rbx)
+ adcq %r13, %rbp
+ adcq %r15, %r8
+ adcq %rsi, %r10
+ adcq %rcx, %r11
+ sbbq %rsi, %rsi
+ movq -72(%rsp), %r12 ## 8-byte Reload
+ movq 24(%r12), %rcx
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -16(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ addq %rdi, %rbp
+ movq %rbp, 24(%rbx)
+ adcq %r15, %r8
+ adcq %rax, %r10
+ adcq %r14, %r11
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %r8
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq 32(%r12), %rdi
+ movq %rdi, %rax
+ mulq 32(%r9)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%r9)
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 16(%r9)
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 8(%r9)
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%r9)
+ addq %r8, %rax
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %rax, 32(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %rbx
+ adcq %rsi, %r13
+ adcq %rcx, %r15
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r14, %r13
+ movq %r13, 56(%rdi)
+ adcq -80(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 64(%rdi)
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre5L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre5L: ## @mcl_fpDbl_sqrPre5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r11
+ movq (%rsi), %rbp
+ movq 8(%rsi), %r13
+ movq %r11, %rax
+ mulq %r13
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %r13, %rax
+ mulq %r13
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r13, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rdi, -24(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ addq %rbx, %rdx
+ adcq %r13, %r12
+ adcq %r15, %r10
+ adcq -16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rbx, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %r12
+ adcq -32(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r13, %r12
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r13
+ addq %r12, %rdi
+ movq -24(%rsp), %r12 ## 8-byte Reload
+ movq %rdi, 16(%r12)
+ adcq %r10, %r15
+ adcq %r9, %r13
+ adcq %r8, %r14
+ adcq %rbp, %r11
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rbx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -48(%rsp), %rax ## 8-byte Reload
+ mulq %rbx
+ movq %rax, %rbp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r15, %rbp
+ movq %rbp, 24(%r12)
+ adcq %r13, %r8
+ adcq %r14, %r10
+ adcq %r11, %rbx
+ adcq %rdi, %r9
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -48(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r8, %rsi
+ movq -24(%rsp), %r8 ## 8-byte Reload
+ movq %rsi, 32(%r8)
+ adcq %r10, %rdi
+ adcq %rbx, %rax
+ adcq %r9, %rbp
+ adcq %r12, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ movq %r8, %rsi
+ movq %rdi, 40(%rsi)
+ adcq %r14, %rax
+ movq %rax, 48(%rsi)
+ adcq %rdx, %rbp
+ movq %rbp, 56(%rsi)
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 64(%rsi)
+ adcq %r15, %rcx
+ movq %rcx, 72(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont5L
+ .p2align 4, 0x90
+_mcl_fp_mont5L: ## @mcl_fp_mont5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %rbx
+ movq 16(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r14
+ movq (%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, %r9
+ addq %rsi, %r9
+ adcq %r11, %r12
+ adcq %r10, %r14
+ adcq %r8, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ imulq %rdx, %rbp
+ movq 32(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 24(%rcx), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, %rsi
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ addq %r15, %rcx
+ adcq %r11, %r10
+ adcq %r13, %rbx
+ adcq -8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r9, %rcx
+ adcq %r12, %r10
+ adcq %r14, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rcx, %rdi
+ adcq %r10, %r11
+ adcq %rbx, %r9
+ adcq %rsi, %rbp
+ adcq %r8, %r14
+ adcq %r15, %r13
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rbx
+ imulq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %r15, %rcx
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rdi, %rax
+ adcq %r11, %rbx
+ adcq %r9, %rcx
+ adcq %rbp, %rsi
+ adcq %r14, %r8
+ adcq %r13, %r10
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbp
+ movq %rbp, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r14, %rdi
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %r15
+ adcq %rcx, %rbp
+ adcq %rsi, %rdi
+ adcq %r8, %r11
+ adcq %r10, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r15, %rsi
+ imulq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ addq %r8, %r12
+ adcq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r15, %rax
+ adcq %rbp, %r12
+ adcq %rdi, %rbx
+ adcq %r11, %rcx
+ adcq %r9, %r14
+ adcq %r13, %r10
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %r15, %rdi
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r12, %r11
+ adcq %rbx, %rsi
+ adcq %rcx, %rdi
+ adcq %r14, %rbp
+ adcq %r10, %r9
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rbx
+ imulq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %r14, %rcx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r11, %rax
+ adcq %rsi, %rbx
+ adcq %rdi, %rcx
+ adcq %rbp, %r15
+ adcq %r9, %r10
+ adcq %r8, %r13
+ movq -112(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r14
+ adcq %rcx, %rbp
+ adcq %r15, %r12
+ adcq %r10, %r11
+ adcq %r13, %r9
+ adcq %r8, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -72(%rsp), %rdi ## 8-byte Reload
+ imulq %r14, %rdi
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ movq %rdi, %r15
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %r15, %rax
+ movq -16(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r10, %rdx
+ adcq %r13, %rdi
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %r8
+ andl $1, %ecx
+ addq %r14, %rax
+ adcq %rbp, %rdx
+ adcq %r12, %rdi
+ adcq %r11, %rsi
+ adcq %r9, %rbx
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r15, %rax
+ movq %rdi, %rbp
+ sbbq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %r9
+ sbbq -56(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -48(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r8, %r11
+ sbbq -40(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r10
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdi, %rbp
+ movq %rbp, 8(%rcx)
+ cmovneq %rsi, %r9
+ movq %r9, 16(%rcx)
+ movq %r10, 24(%rcx)
+ cmovneq %r8, %r11
+ movq %r11, 32(%rcx)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF5L
+ .p2align 4, 0x90
+_mcl_fp_montNF5L: ## @mcl_fp_montNF5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, %r8
+ movq %rdx, %r13
+ movq 24(%rsi), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq 16(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq (%rsi), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %rbp
+ addq %rbx, %rbp
+ adcq %r15, %r12
+ adcq %r10, %r9
+ adcq %r8, %r11
+ adcq $0, %r13
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r14, %rsi
+ imulq %rax, %rsi
+ movq 32(%rcx), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %rbx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rdi
+ addq %r14, %rax
+ adcq %rbp, %rcx
+ adcq %r12, %r8
+ adcq %r9, %rbx
+ adcq %r11, %r10
+ adcq $0, %r13
+ addq %rdx, %rcx
+ adcq %r15, %r8
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r15
+ addq %r14, %r15
+ adcq %rdi, %r11
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rsi
+ adcq %r8, %r15
+ adcq %rbx, %r11
+ adcq %r10, %r9
+ adcq %r13, %rbp
+ adcq $0, %r12
+ movq %rsi, %rdi
+ imulq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq %r9, %r8
+ adcq %rbp, %r13
+ adcq $0, %r12
+ addq %rdx, %r10
+ adcq %rbx, %r14
+ adcq %rcx, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rbx, %rcx
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %r10, %r11
+ adcq %r14, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r12, %r9
+ adcq $0, %r15
+ movq %r11, %rbx
+ imulq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r15
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r14, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbp, %r14
+ adcq %r10, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r15, %r9
+ adcq $0, %r11
+ movq %r14, %rbx
+ imulq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r11
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r15, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rbp, %r12
+ adcq %r10, %rdi
+ adcq %r8, %r15
+ adcq %r13, %r14
+ adcq %r11, %r9
+ adcq $0, %rbx
+ movq -88(%rsp), %r8 ## 8-byte Reload
+ imulq %r12, %r8
+ movq %r8, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r8, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ movq %r8, %r13
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r13, %rax
+ movq -24(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ addq %r12, %r8
+ adcq %rdi, %rax
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %r9, %rcx
+ adcq $0, %rbx
+ addq %r11, %rax
+ adcq %rdx, %rsi
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %r11
+ subq %r10, %r11
+ movq %rsi, %r10
+ sbbq %r13, %r10
+ movq %rbp, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %rdx
+ sbbq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r11
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %r11, (%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 8(%rax)
+ cmovsq %rbp, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rcx, %r9
+ movq %r9, 24(%rax)
+ cmovsq %rbx, %rdx
+ movq %rdx, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed5L
+ .p2align 4, 0x90
+_mcl_fp_montRed5L: ## @mcl_fp_montRed5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdi
+ imulq %rax, %rdi
+ movq 32(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r13
+ movq 24(%rcx), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r10
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rcx), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ addq %rbx, %rcx
+ adcq %r14, %r12
+ adcq %r11, %r15
+ adcq %r8, %r10
+ adcq $0, %r13
+ addq %r9, %rax
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r13
+ movq %r13, -112(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ sbbq %r8, %r8
+ andl $1, %r8d
+ movq %rcx, %rsi
+ movq -104(%rsp), %r9 ## 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rdi, %rbx
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ adcq %r10, %r13
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq %rsi, %rcx
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq -56(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ movq %rdx, %r15
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rsi, %rax
+ adcq %rbx, %rcx
+ adcq %r13, %r12
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq %rcx, %rsi
+ imulq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r9
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rdi, %rsi
+ adcq %rbx, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r12, %rsi
+ adcq %r15, %r9
+ adcq %r10, %r13
+ adcq %rbp, %r14
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ imulq %rsi, %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ movq %rdi, %r10
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %r10, %rax
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ addq %r12, %rdx
+ adcq %r15, %rdi
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rsi, %rax
+ adcq %r9, %rdx
+ adcq %r13, %rdi
+ adcq %r14, %rbx
+ adcq %r11, %rcx
+ adcq -48(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r8
+ movq %rdx, %rax
+ subq %r10, %rax
+ movq %rdi, %rsi
+ sbbq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbx, %r9
+ sbbq -56(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq -88(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq -80(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %r8
+ andl $1, %r8d
+ cmovneq %rbp, %r11
+ testb %r8b, %r8b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rdi, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %rbx, %r9
+ movq %r9, 16(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 24(%rdx)
+ movq %r11, 32(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre5L
+ .p2align 4, 0x90
+_mcl_fp_addPre5L: ## @mcl_fp_addPre5L
+## BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre5L
+ .p2align 4, 0x90
+_mcl_fp_subPre5L: ## @mcl_fp_subPre5L
+## BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_shr1_5L
+ .p2align 4, 0x90
+_mcl_fp_shr1_5L: ## @mcl_fp_shr1_5L
+## BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+
+ .globl _mcl_fp_add5L
+ .p2align 4, 0x90
+_mcl_fp_add5L: ## @mcl_fp_add5L
+## BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB74_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+LBB74_2: ## %carry
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_addNF5L
+ .p2align 4, 0x90
+_mcl_fp_addNF5L: ## @mcl_fp_addNF5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub5L
+ .p2align 4, 0x90
+_mcl_fp_sub5L: ## @mcl_fp_sub5L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB76_2
+## BB#1: ## %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+LBB76_2: ## %nocarry
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subNF5L
+ .p2align 4, 0x90
+_mcl_fp_subNF5L: ## @mcl_fp_subNF5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r13
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r10
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r8
+ movd %xmm1, %r11
+ movd %xmm3, %r9
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r14
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r15
+ movd %xmm0, %rbx
+ movd %xmm2, %r12
+ subq %rbx, %r12
+ sbbq %r14, %r15
+ sbbq %r11, %r9
+ sbbq %r10, %r8
+ sbbq 32(%rdx), %r13
+ movq %r13, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ shldq $1, %r13, %rbx
+ movq 8(%rcx), %rsi
+ andq %rbx, %rsi
+ andq (%rcx), %rbx
+ movq 32(%rcx), %r10
+ andq %rdx, %r10
+ movq 24(%rcx), %rax
+ andq %rdx, %rax
+ rolq %rdx
+ andq 16(%rcx), %rdx
+ addq %r12, %rbx
+ movq %rbx, (%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r13, %r10
+ movq %r10, 32(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add5L
+ .p2align 4, 0x90
+_mcl_fpDbl_add5L: ## @mcl_fpDbl_add5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub5L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub5L: ## @mcl_fpDbl_sub5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre6L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre6L: ## @mcl_fp_mulUnitPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %rbp, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r13
+ movq %r13, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r10
+ movq %r10, 40(%rdi)
+ adcq $0, %r9
+ movq %r9, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_mulPre6L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre6L: ## @mcl_fpDbl_mulPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %r12
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ movq (%r12), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rbp
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rbx
+ movq 40(%r12), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rax, %r9
+ movq %rdx, %rdi
+ movq 16(%r12), %r8
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %rbp
+ movq 8(%r12), %r10
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r13
+ addq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r14, %r15
+ adcq %r9, %rbp
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ movq -64(%rsp), %r9 ## 8-byte Reload
+ movq 8(%r9), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r13, %rax
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ movq %rax, 8(%r13)
+ adcq %r15, %rbx
+ adcq %rbp, %r8
+ adcq %rdi, %r14
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %rbx
+ adcq %r10, %r8
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq 40(%r12), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rcx
+ mulq %rcx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r10
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r9
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%r12), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%r12), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rdi
+ movq %rdx, %r15
+ movq (%r12), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 16(%r13)
+ adcq %r8, %rdi
+ adcq %r14, %rbp
+ adcq %r11, %r9
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rdi
+ adcq %r15, %rbp
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq -64(%rsp), %rbx ## 8-byte Reload
+ movq 24(%rbx), %rsi
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ addq %rdi, %rax
+ movq -16(%rsp), %rsi ## 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %rbp, %r11
+ adcq %r9, %r13
+ adcq %r10, %r15
+ adcq -72(%rsp), %r14 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq %r8, %r13
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq 40(%r12), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq 32(%rbx), %rdi
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 16(%r12), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %rbx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq (%r12), %rbp
+ movq 8(%r12), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq %rax, 32(%rsi)
+ adcq %r13, %r12
+ adcq %r15, %rbx
+ adcq %r14, %r8
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rcx, %r9
+ movq -64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ sbbq %rsi, %rsi
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -8(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -104(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ addq %rdi, %r12
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ movq %r12, 40(%rcx)
+ adcq %r11, %rbx
+ adcq %rax, %r8
+ adcq %r14, %r10
+ adcq %r15, %r9
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rbp, %rbx
+ movq %rbx, 48(%rcx)
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rcx)
+ adcq %rdx, %r10
+ movq %r10, 64(%rcx)
+ adcq %r13, %r9
+ movq %r9, 72(%rcx)
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 80(%rcx)
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 88(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre6L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre6L: ## @mcl_fpDbl_sqrPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r8
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 40(%rsi), %r9
+ movq (%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %r12, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, %r14
+ movq 8(%rsi), %r8
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ addq %rcx, %rbp
+ adcq %rdx, %r11
+ adcq %r15, %r14
+ adcq %r13, %rdi
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ movq %rbx, -72(%rsp) ## 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r12, %rax
+ mulq %r8
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rax, %rbx
+ addq %rcx, %rbp
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %rbp, 8(%rax)
+ adcq %r11, %rbx
+ adcq %r14, %r12
+ adcq %rdi, %r15
+ adcq %r10, %r9
+ movq %r13, %rax
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdx, %r12
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ movq 40(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r8
+ movq %r8, -24(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rcx
+ addq %rbx, %r14
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %r14, 16(%rax)
+ adcq %r12, %r10
+ adcq %r15, %rcx
+ adcq %r8, %r9
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ movq -96(%rsp), %r8 ## 8-byte Reload
+ adcq %r13, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -104(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -32(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ addq %r10, %rbx
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 24(%rax)
+ adcq %rcx, %r14
+ adcq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r8, %r15
+ adcq %rdi, %r12
+ sbbq %rcx, %rcx
+ movq 8(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r10
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ andl $1, %ecx
+ addq -40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq 24(%rsi), %rdi
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %r10
+ movq %rax, %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rsi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %r10
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %r10, %rax
+ mulq %r10
+ movq %rdx, %r8
+ addq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ movq %r14, 32(%rdx)
+ adcq -32(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r15, %rbp
+ adcq %r12, %rax
+ adcq %rdi, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %r11, %rbp
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq -16(%rsp), %r8 ## 8-byte Reload
+ adcq %r8, %r10
+ addq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rdx)
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %rbx, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -64(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 48(%rdx)
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 56(%rdx)
+ adcq -80(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 64(%rdx)
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 72(%rdx)
+ adcq %r8, %r10
+ movq %r10, 80(%rdx)
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont6L
+ .p2align 4, 0x90
+_mcl_fp_mont6L: ## @mcl_fp_mont6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $48, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq 32(%rsi), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %rbx
+ movq 16(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq (%rsi), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r9, %r13
+ adcq %r8, %r12
+ adcq %r14, %rbx
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ adcq %r10, %r15
+ movq %r15, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ imulq %rdx, %rbx
+ movq 40(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %r14
+ movq 24(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r15
+ movq 16(%rcx), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq (%rcx), %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq %r9, %r15
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ adcq %r13, %rbp
+ adcq %r12, %r11
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %r9, %rcx
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -88(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r12
+ adcq %rbp, %rdi
+ adcq %r11, %rcx
+ adcq %r15, %r13
+ adcq %r14, %r8
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ imulq -32(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r10, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r9
+ adcq %rcx, %rbp
+ adcq %r13, %rsi
+ adcq %r8, %r15
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq %r8, %rbx
+ adcq %rdi, %r12
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r13
+ adcq %rbp, %rcx
+ adcq %rsi, %rbx
+ adcq %r15, %r12
+ adcq %r14, %r11
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rdi
+ imulq -32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %r10, %r8
+ adcq %r15, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rcx, %r8
+ adcq %rbx, %rbp
+ adcq %r12, %rsi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r12, %rcx
+ adcq %r10, %rbx
+ adcq %rdi, %r15
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r8, %r13
+ adcq %rbp, %rcx
+ adcq %rsi, %rbx
+ adcq %r9, %r15
+ adcq %r14, %r11
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rsi
+ imulq -32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r9, %rsi
+ adcq %r8, %r12
+ adcq %r10, %r14
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rcx, %rsi
+ adcq %rbx, %r12
+ adcq %r15, %r14
+ adcq %r11, %rdi
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %r13
+ addq %r9, %r13
+ adcq %r11, %r15
+ adcq -48(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r8
+ adcq %r12, %r13
+ adcq %r14, %r15
+ adcq %rdi, %r10
+ adcq %rbp, %rbx
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r8, %rcx
+ imulq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r12, %r14
+ adcq %rdi, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %r13, %r14
+ adcq %r15, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %r11
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %r8
+ addq %r9, %r8
+ adcq %rbx, %r10
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r12 ## 8-byte Folded Reload
+ movq -64(%rsp), %rax ## 8-byte Reload
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %r15
+ adcq %rbp, %r8
+ adcq %rsi, %r10
+ adcq %r11, %r13
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -72(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ adcq %rdi, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -32(%rsp), %rdi ## 8-byte Reload
+ imulq %r15, %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ andl $1, %ecx
+ addq %r14, %rax
+ adcq %r11, %rdx
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %r9
+ adcq %r8, %rax
+ adcq %r10, %rdx
+ adcq %r13, %rbx
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq -24(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdx, %r9
+ sbbq -16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq (%rsp), %r11 ## 8-byte Folded Reload
+ movq %r12, %r14
+ sbbq 8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r15
+ sbbq 16(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rsi, %r11
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq 40(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rbx, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %r12, %r14
+ movq %r14, 32(%rax)
+ cmovneq %rbp, %r15
+ movq %r15, 40(%rax)
+ addq $48, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF6L
+ .p2align 4, 0x90
+_mcl_fp_montNF6L: ## @mcl_fp_montNF6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 32(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq 16(%rsi), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq (%rsi), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbp
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r11, %rbp
+ adcq %r15, %r8
+ adcq %r14, %r9
+ adcq -64(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ imulq %rax, %rbx
+ movq 40(%rcx), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r11
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ addq %r13, %rax
+ adcq %rdi, %rcx
+ adcq %rbp, %r10
+ adcq %r8, %r12
+ adcq %r9, %r15
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r11, %r10
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %rbp
+ addq %r11, %rbp
+ adcq %r14, %rbx
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rcx, %rdi
+ adcq %r10, %rbp
+ adcq %r12, %rbx
+ adcq %r15, %rsi
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ movq %rdi, %r11
+ imulq -48(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r11, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r11, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %r11, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %rbp, %r14
+ adcq %rbx, %r10
+ adcq %rsi, %rcx
+ adcq %r13, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq %r9, %rax
+ adcq $0, %r8
+ addq %rdx, %r14
+ adcq %r12, %r10
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -120(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rbp
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r14, %rbp
+ adcq %r10, %rbx
+ adcq %rcx, %rsi
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r13
+ movq %rbp, %rcx
+ imulq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %rbp, %rax
+ adcq %rbx, %rdi
+ adcq %rsi, %r14
+ adcq %r12, %r10
+ adcq %r11, %r9
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq %r15, %rax
+ adcq $0, %r13
+ addq %rdx, %rdi
+ adcq %r8, %r14
+ adcq -104(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdi, %r8
+ adcq %r14, %rbp
+ adcq %r10, %rbx
+ adcq %r9, %rsi
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq $0, %r15
+ movq %r8, %r14
+ imulq -48(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %r14, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r14, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r12
+ adcq %rsi, %r10
+ adcq %rcx, %r13
+ adcq %r11, %r9
+ adcq $0, %r15
+ addq %rdx, %rdi
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rdi, %r11
+ adcq %r12, %rbp
+ adcq %r10, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq $0, %r14
+ movq %r11, %rcx
+ imulq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r15
+ adcq %rsi, %r10
+ adcq %r8, %r12
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r9, %rcx
+ adcq $0, %r14
+ addq %rdx, %rdi
+ adcq %r13, %r15
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -120(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbp, %r10
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rdi, %r9
+ adcq %r15, %r8
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq $0, %rbx
+ movq -48(%rsp), %rcx ## 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rcx, %rax
+ movq %rcx, %r15
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r15, %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r10, %rcx
+ adcq %r13, %rbp
+ adcq %r12, %rdi
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ addq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %r14
+ subq -32(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq %r15, %r8
+ movq %rbp, %r9
+ sbbq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -24(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq -16(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq -64(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, %rdx
+ sarq $63, %rdx
+ cmovsq %rax, %r14
+ movq 32(%rsp), %rax ## 8-byte Reload
+ movq %r14, (%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 8(%rax)
+ cmovsq %rbp, %r9
+ movq %r9, 16(%rax)
+ cmovsq %rdi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rsi, %r11
+ movq %r11, 32(%rax)
+ cmovsq %rbx, %r15
+ movq %r15, 40(%rax)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed6L
+ .p2align 4, 0x90
+_mcl_fp_montRed6L: ## @mcl_fp_montRed6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $24, %rsp
+ movq %rdx, %rbp
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq -8(%rbp), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdi
+ imulq %rax, %rdi
+ movq 40(%rbp), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rbp), %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq 24(%rbp), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, %r11
+ movq 16(%rbp), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %rcx
+ movq %rdx, %r13
+ movq (%rbp), %rbx
+ movq 8(%rbp), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r8
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rbx, %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rbx
+ addq %rbp, %rbx
+ adcq %rcx, %r8
+ adcq %r12, %r13
+ adcq %r15, %r11
+ adcq %r14, %r9
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %r13
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r9
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rax
+ movq 80(%rsi), %rcx
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rbp
+ movq 56(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ sbbq %r14, %r14
+ andl $1, %r14d
+ movq %rbx, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rdi
+ addq %r9, %rdi
+ adcq %r10, %rbp
+ adcq 8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq (%rsp), %r12 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %rax
+ adcq %r8, %rdi
+ adcq %r13, %rbp
+ adcq %r11, %rcx
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r14
+ movq %rdi, %rbx
+ imulq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r11, %r10
+ adcq %r9, %r8
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ adcq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbx ## 8-byte Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rdi, %rax
+ adcq %rbp, %r10
+ adcq %rcx, %r8
+ adcq %r12, %rsi
+ adcq %r15, %r13
+ adcq -112(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r14
+ movq %r10, %rcx
+ imulq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ movq -24(%rsp), %rbp ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %r9, %rcx
+ adcq %rbx, %rdi
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r8, %rcx
+ adcq %rsi, %rdi
+ adcq %r13, %r12
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ movq -88(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ adcq $0, %r14
+ movq %rcx, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r10, %rbx
+ adcq %rbp, %r9
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbp ## 8-byte Reload
+ adcq -72(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rcx, %rax
+ adcq %rdi, %rbx
+ adcq %r12, %r9
+ adcq %r15, %r13
+ adcq %r11, %rbp
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -88(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq -80(%rsp), %r8 ## 8-byte Reload
+ imulq %rbx, %r8
+ movq %r8, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r8, %rax
+ movq -16(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rcx
+ movq %rax, %r15
+ movq %r8, %rax
+ movq -8(%rsp), %r8 ## 8-byte Reload
+ mulq %r8
+ addq %r15, %rdx
+ adcq %r10, %rcx
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r9, %rdx
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r14
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rcx, %rbx
+ sbbq %r12, %rbx
+ movq %rsi, %r8
+ sbbq -56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %r9
+ sbbq -48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -40(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r15
+ sbbq -24(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %r14
+ andl $1, %r14d
+ cmovneq %rbp, %r15
+ testb %r14b, %r14b
+ cmovneq %rdx, %rax
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%rdx)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rdx)
+ cmovneq %rdi, %r9
+ movq %r9, 24(%rdx)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rdx)
+ movq %r15, 40(%rdx)
+ addq $24, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre6L
+ .p2align 4, 0x90
+_mcl_fp_addPre6L: ## @mcl_fp_addPre6L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subPre6L
+ .p2align 4, 0x90
+_mcl_fp_subPre6L: ## @mcl_fp_subPre6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_6L
+ .p2align 4, 0x90
+_mcl_fp_shr1_6L: ## @mcl_fp_shr1_6L
+## BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+
+ .globl _mcl_fp_add6L
+ .p2align 4, 0x90
+_mcl_fp_add6L: ## @mcl_fp_add6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB89_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+LBB89_2: ## %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF6L
+ .p2align 4, 0x90
+_mcl_fp_addNF6L: ## @mcl_fp_addNF6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub6L
+ .p2align 4, 0x90
+_mcl_fp_sub6L: ## @mcl_fp_sub6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB91_2
+## BB#1: ## %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+LBB91_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF6L
+ .p2align 4, 0x90
+_mcl_fp_subNF6L: ## @mcl_fp_subNF6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r11
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rax
+ movd %xmm2, %r14
+ movd %xmm5, %r8
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r15
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r9
+ movd %xmm1, %r12
+ movd %xmm4, %r10
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm1, %r13
+ movd %xmm0, %rsi
+ movd %xmm3, %rbp
+ subq %rsi, %rbp
+ sbbq %rbx, %r13
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %r8
+ sbbq %r11, %rax
+ movq %rax, %rsi
+ sarq $63, %rsi
+ movq %rsi, %rbx
+ shldq $1, %rax, %rbx
+ andq (%rcx), %rbx
+ movq 40(%rcx), %r11
+ andq %rsi, %r11
+ movq 32(%rcx), %r14
+ andq %rsi, %r14
+ movq 24(%rcx), %r15
+ andq %rsi, %r15
+ movq 16(%rcx), %rdx
+ andq %rsi, %rdx
+ rolq %rsi
+ andq 8(%rcx), %rsi
+ addq %rbp, %rbx
+ movq %rbx, (%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r14
+ movq %r14, 32(%rdi)
+ adcq %rax, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add6L
+ .p2align 4, 0x90
+_mcl_fpDbl_add6L: ## @mcl_fpDbl_add6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub6L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub6L: ## @mcl_fpDbl_sub6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre7L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre7L: ## @mcl_fp_mulUnitPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %rbp, %r8
+ movq %r8, 16(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r14, %r13
+ movq %r13, 32(%rdi)
+ adcq -16(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 48(%rdi)
+ adcq $0, %r10
+ movq %r10, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_mulPre7L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre7L: ## @mcl_fpDbl_mulPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rbp
+ movq %rbp, -88(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq 48(%r9), %r14
+ movq %rax, (%rdi)
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, %rbp
+ movq 24(%r9), %r8
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r15
+ movq %rdx, %rbx
+ movq 16(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rsi
+ movq %rax, %r13
+ movq %rdx, %r12
+ movq 8(%r9), %r11
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, %r10
+ addq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r15, %r12
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %r10 ## 8-byte Reload
+ movq %rax, 8(%r10)
+ adcq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %rbx, %r14
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %rdi
+ adcq %r11, %rbp
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rdx, %rax
+ mulq %rcx
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r8
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %rdi, %rax
+ movq %rax, 16(%r10)
+ adcq %rbp, %r11
+ adcq %r14, %r8
+ adcq %r15, %rbx
+ adcq %r13, %r12
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq (%rsp), %r8 ## 8-byte Folded Reload
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbp
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ addq %r11, %rax
+ movq -8(%rsp), %rsi ## 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %r8, %r10
+ adcq %rbx, %rdi
+ adcq %r12, %r15
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ movq -64(%rsp), %rbp ## 8-byte Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %r10
+ adcq %r14, %rdi
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rbx ## 8-byte Reload
+ movq 32(%rbx), %rcx
+ mulq %rcx
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq (%r9), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, 32(%rcx)
+ adcq %rdi, %r11
+ adcq %r15, %r14
+ adcq %r13, %rbp
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %r11
+ adcq %r8, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ adcq 8(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ movq 40(%rbx), %rcx
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, 40(%rcx)
+ adcq %r14, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rbx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r13, %rdi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %r11
+ sbbq %rcx, %rcx
+ movq %r11, %rax
+ mulq 48(%r9)
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq 40(%r9)
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq 32(%r9)
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq 24(%r9)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq 16(%r9)
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq 8(%r9)
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r11, %rax
+ mulq (%r9)
+ andl $1, %ecx
+ addq -40(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ addq %rax, %r8
+ movq -8(%rsp), %r9 ## 8-byte Reload
+ movq %r8, 48(%r9)
+ adcq %r12, %rsi
+ adcq %r14, %rbx
+ adcq %rbp, %r15
+ adcq %r13, %r10
+ adcq -32(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rsi
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %r9, %rdx
+ movq %rsi, 56(%rdx)
+ movq %rbx, 64(%rdx)
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 72(%rdx)
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 80(%rdx)
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 96(%rdx)
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdx)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre7L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre7L: ## @mcl_fpDbl_sqrPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rsi, %r9
+ movq %rdi, -24(%rsp) ## 8-byte Spill
+ movq 24(%r9), %r10
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ movq 32(%r9), %r14
+ movq %r14, -88(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rsi
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ movq 48(%r9), %rbp
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ movq (%r9), %rbx
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq %rax, (%rdi)
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r14, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r10, %rax
+ mulq %rbx
+ movq %rax, %r14
+ movq %rdx, %rdi
+ movq 16(%r9), %r15
+ movq %r15, %rax
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r12
+ movq 8(%r9), %rbp
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rax, %rcx
+ adcq %rdx, %r10
+ adcq %r14, %r12
+ adcq %rsi, %rdi
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ movq %r11, -96(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %rbp
+ addq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %rcx, 8(%rax)
+ adcq %r10, %rbp
+ adcq %r12, %rbx
+ adcq %rdi, %r14
+ adcq %r13, %r11
+ movq %rsi, %rax
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq %r15, %r14
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -40(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rdi
+ mulq %rdi
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rcx
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rax, %r10
+ movq %r10, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq %r12, -72(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbp, %r8
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %r8, 16(%rax)
+ adcq %rbx, %r15
+ adcq %r14, %rdi
+ adcq %r10, %r11
+ adcq -48(%rsp), %r13 ## 8-byte Folded Reload
+ movq -56(%rsp), %r10 ## 8-byte Reload
+ adcq -40(%rsp), %r10 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -16(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq %r12, %r13
+ adcq -32(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq (%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %r15, %rbx
+ movq -24(%rsp), %rcx ## 8-byte Reload
+ movq %rbx, 24(%rcx)
+ adcq %rdi, %r12
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, %r15
+ movq %r8, %rsi
+ adcq %r10, %rsi
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rbp, %r14
+ sbbq %r8, %r8
+ movq 8(%r9), %rcx
+ movq 40(%r9), %r13
+ movq %rcx, %rax
+ mulq %r13
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq (%r9), %rbp
+ movq %rbp, %rax
+ mulq %r13
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rdi
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %rbp
+ movq %rdx, (%rsp) ## 8-byte Spill
+ andl $1, %r8d
+ addq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -64(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r13, %rax
+ mulq %rdi
+ movq %rax, %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ movq %rdx, %rbx
+ movq 24(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbp, %r12
+ movq -24(%rsp), %rbp ## 8-byte Reload
+ movq %r12, 32(%rbp)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ adcq %r8, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq (%rsp), %r11 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq 8(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq %rbx, %rcx
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r13, %rax
+ mulq %r13
+ movq %rax, %r13
+ addq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbp)
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r15, %r12
+ adcq %rdi, %r8
+ movq %r14, %rax
+ adcq -48(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rcx, %r13
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq %rsi, %rcx
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %rbx, %r13
+ adcq %rdx, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ movq 48(%r9), %rcx
+ movq %rcx, %rax
+ mulq 40(%r9)
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%r9)
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%r9)
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 16(%r9)
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%r9)
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%r9)
+ movq %rdx, %r9
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ addq %r10, %rsi
+ movq -24(%rsp), %r10 ## 8-byte Reload
+ movq %rsi, 48(%r10)
+ adcq %r12, %rdi
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r13, %rbx
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r14, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rdi
+ adcq %r15, %r11
+ movq %r10, %rsi
+ movq %rdi, 56(%rsi)
+ movq %r11, 64(%rsi)
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 72(%rsi)
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 80(%rsi)
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 88(%rsi)
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%rsi)
+ adcq %rdx, %rcx
+ movq %rcx, 104(%rsi)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont7L
+ .p2align 4, 0x90
+_mcl_fp_mont7L: ## @mcl_fp_mont7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 32(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r9
+ movq 24(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 16(%rsi), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %rbx
+ movq (%rsi), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdx, %r10
+ addq %rsi, %r10
+ adcq %r15, %r13
+ adcq %r14, %rbx
+ movq %rbx, -72(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -56(%rsp) ## 8-byte Spill
+ adcq (%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -96(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq 48(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r9
+ movq 24(%rcx), %rdx
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, %rbp
+ movq (%rcx), %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq %r15, %rcx
+ adcq %r8, %rbp
+ adcq %r14, %rbx
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -88(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ addq -80(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r10, %r11
+ adcq %r13, %rcx
+ adcq -72(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -56(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %r14
+ addq %r9, %r14
+ adcq %r12, %r13
+ adcq -64(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r8
+ adcq %rcx, %r14
+ adcq %rbp, %r13
+ adcq %rbx, %r15
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r8, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %rbp, %rcx
+ adcq %rdi, %rsi
+ adcq %rbx, %r9
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %r14, %rcx
+ adcq %r13, %rsi
+ adcq %r15, %r9
+ adcq %r10, %r12
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r8, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %r14
+ adcq %rsi, %r10
+ adcq %r9, %rdi
+ adcq %r12, %rbp
+ adcq %r11, %r13
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r14, %rbx
+ imulq 40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq %r15, %r8
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rbx ## 8-byte Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %rax
+ adcq %r10, %r11
+ adcq %rdi, %r8
+ adcq %rbp, %rsi
+ adcq %r13, %rcx
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r13
+ addq %r15, %r13
+ adcq %r14, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %r8, %r13
+ adcq %rsi, %rdi
+ adcq %rcx, %rbp
+ adcq %r9, %r12
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r10, %rbx
+ imulq 40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r15, %r11
+ adcq %r14, %r8
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rbx ## 8-byte Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r13, %r11
+ adcq %rdi, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rcx
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq %r13, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %r8, %r12
+ adcq %rsi, %rdi
+ adcq %rcx, %rbp
+ adcq %r9, %r15
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %r10, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r8, %r11
+ adcq %r14, %rbx
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %r12, %r11
+ adcq %rdi, %rbx
+ adcq %rbp, %rsi
+ adcq %r15, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r12, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq -120(%rsp), %r14 ## 8-byte Reload
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq -104(%rsp), %rcx ## 8-byte Reload
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %rbx, %r8
+ adcq %rsi, %rdi
+ adcq %r9, %rbp
+ adcq %r13, %r14
+ movq %r14, -120(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq %r15, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r10, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r9, %r11
+ adcq %r13, %rbx
+ adcq -64(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r8, %r11
+ adcq %rdi, %rbx
+ adcq %rbp, %r15
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -104(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r10
+ addq %rbp, %r10
+ adcq %rdi, %r14
+ adcq -48(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq -24(%rsp), %rax ## 8-byte Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq -16(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ addq %r11, %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ adcq %rbx, %r10
+ adcq %r15, %r14
+ adcq %r12, %r13
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -40(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r8, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 40(%rsp), %r8 ## 8-byte Reload
+ imulq %rsi, %r8
+ movq %r8, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %r8, %rax
+ movq %r8, %r12
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r8
+ movq %r12, %rax
+ movq 72(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ andl $1, %ecx
+ addq %r15, %rax
+ adcq %r11, %rdx
+ adcq 16(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ adcq 40(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq -48(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r14, %rdx
+ adcq %r13, %rbp
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -32(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq 48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdx, %r10
+ sbbq %r12, %r10
+ movq %rbp, %r11
+ sbbq 56(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r14
+ sbbq 64(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r9, %r12
+ sbbq (%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r13
+ sbbq 8(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r13
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %rbp, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbx, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %r9, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $88, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF7L
+ .p2align 4, 0x90
+_mcl_fp_montNF7L: ## @mcl_fp_montNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, 72(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 40(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 32(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdx, %rbp
+ movq 24(%rsi), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r15
+ movq 16(%rsi), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, %r14
+ movq (%rsi), %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq %r9, %r13
+ adcq %r10, %r14
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -104(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rax, %r8
+ imulq %rdx, %r10
+ movq 48(%rcx), %rdx
+ movq %rdx, 32(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %rbx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r10, %rax
+ mulq %rdi
+ addq %r8, %rax
+ adcq %rsi, %rcx
+ adcq %r13, %rbx
+ adcq %r14, %r12
+ adcq %r15, %rbp
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r9, %rbx
+ adcq 24(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r15
+ addq %r11, %r15
+ adcq %rdi, %r8
+ adcq 24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %r10
+ adcq %rbx, %r15
+ adcq %r12, %r8
+ adcq %rbp, %r9
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r10, %rsi
+ imulq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r15, %rbp
+ adcq %r8, %r12
+ adcq %r9, %r11
+ adcq %r13, %rbx
+ movq -120(%rsp), %r8 ## 8-byte Reload
+ adcq %r14, %r8
+ movq -112(%rsp), %rsi ## 8-byte Reload
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbp
+ adcq %rdi, %r12
+ adcq %rcx, %r11
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r15
+ addq %r13, %r15
+ adcq %r14, %rcx
+ adcq 24(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r10
+ adcq %r12, %r15
+ adcq %r11, %rcx
+ adcq %rbx, %r8
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r10, %rdi
+ imulq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r15, %rbx
+ adcq %rcx, %rbp
+ adcq %r8, %r12
+ adcq %rsi, %r11
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r9, %rcx
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r14, %rbp
+ adcq %r13, %r12
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r13, %r8
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbx, %r10
+ adcq %rbp, %rdi
+ adcq %r12, %r8
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ adcq $0, %r9
+ movq %r10, %rbp
+ imulq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %rdi, %rbx
+ adcq %r8, %r11
+ adcq %rcx, %r12
+ adcq %rsi, %r14
+ movq -104(%rsp), %rcx ## 8-byte Reload
+ adcq %r15, %rcx
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rdx, %rbx
+ adcq %r13, %r11
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r13
+ addq %r14, %r13
+ adcq %rbp, %r8
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbx, %rdi
+ adcq %r11, %r13
+ adcq %r12, %r8
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %rdi, %rbp
+ imulq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r13, %rbx
+ adcq %r8, %r14
+ adcq %rcx, %r12
+ adcq %rsi, %r9
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r10, %rcx
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdx, %rbx
+ adcq %r11, %r14
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -128(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rbp
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r9, %r10
+ adcq %rcx, %r8
+ adcq 24(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r11
+ adcq %r14, %r10
+ adcq %r12, %r8
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r11, %rbx
+ imulq 16(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rcx
+ adcq %r8, %rbp
+ adcq %rdi, %r15
+ adcq %rsi, %r9
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq %r13, %rbx
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r12, %rbp
+ adcq %r14, %r15
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbx, %r10
+ adcq %r9, %r11
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ adcq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rcx, %r12
+ adcq %rbp, %r8
+ adcq %r15, %r10
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ imulq %r12, %rdi
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ addq %r12, %r15
+ adcq %r8, %rax
+ adcq %r10, %rbx
+ adcq %r11, %rcx
+ adcq %r13, %rsi
+ adcq -48(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq (%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq 16(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rax, %r13
+ subq 40(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r12
+ sbbq 48(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rsi, %r10
+ sbbq -32(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq -24(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq -16(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r14, %rdx
+ sbbq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r13, (%rax)
+ cmovsq %rbx, %r12
+ movq %r12, 8(%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rbp, %r11
+ movq %r11, 32(%rax)
+ cmovsq %r9, %r15
+ movq %r15, 40(%rax)
+ cmovsq %r14, %rdx
+ movq %rdx, 48(%rax)
+ addq $80, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed7L
+ .p2align 4, 0x90
+_mcl_fp_montRed7L: ## @mcl_fp_montRed7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $72, %rsp
+ movq %rdx, %rcx
+ movq %rdi, 64(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ imulq %rax, %rbp
+ movq 48(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, %r15
+ movq 32(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 24(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, %r10
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq (%rcx), %rdi
+ movq %rdi, 24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r8
+ addq %rbx, %r8
+ adcq %r9, %rcx
+ adcq %r13, %r12
+ adcq %r14, %r10
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -48(%rsp), %rax ## 8-byte Folded Reload
+ adcq 8(%rsi), %r8
+ adcq 16(%rsi), %rcx
+ adcq 24(%rsi), %r12
+ adcq 32(%rsi), %r10
+ movq %r10, 40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %r11
+ movq %r11, -40(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r15
+ movq %r15, -96(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbp
+ movq 72(%rsi), %rbx
+ movq 64(%rsi), %r9
+ adcq $0, %r9
+ adcq $0, %rbx
+ movq %rbx, -8(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -80(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r8, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r15, %r14
+ adcq %rsi, %r11
+ adcq %r10, %rbp
+ adcq 56(%rsp), %rbx ## 8-byte Folded Reload
+ movq -88(%rsp), %rdi ## 8-byte Reload
+ adcq 48(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rsi ## 8-byte Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %rcx, %r14
+ adcq %r12, %r11
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r9, %rdx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -8(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r14, %rcx
+ imulq -56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r13, %r10
+ adcq %r12, %r9
+ adcq %r8, %rdi
+ adcq 48(%rsp), %rsi ## 8-byte Folded Reload
+ movq -40(%rsp), %r8 ## 8-byte Reload
+ adcq 32(%rsp), %r8 ## 8-byte Folded Reload
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ adcq 40(%rsp), %rdx ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r11, %r10
+ adcq %rbp, %r9
+ adcq %rbx, %rdi
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -40(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r10, %rbp
+ imulq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ movq (%rsp), %r8 ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %r15
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r15, %r11
+ adcq %r14, %rbx
+ adcq %r13, %rcx
+ adcq 32(%rsp), %r12 ## 8-byte Folded Reload
+ movq -88(%rsp), %r14 ## 8-byte Reload
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbp ## 8-byte Reload
+ adcq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r9, %r11
+ adcq %rdi, %rbx
+ adcq %rsi, %rcx
+ adcq -40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r11, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq %r8
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r8, %r9
+ adcq %r10, %rbp
+ adcq %r14, %rsi
+ adcq -8(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %rax
+ adcq %rbx, %r9
+ adcq %rcx, %rbp
+ adcq %r12, %rsi
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ movq -104(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r9, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %r8, %rdi
+ adcq %rcx, %r10
+ adcq %rbx, %r11
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbx ## 8-byte Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %rsi, %r10
+ adcq %r13, %r11
+ adcq %r15, %r12
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -104(%rsp) ## 8-byte Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rbp ## 8-byte Reload
+ imulq %rdi, %rbp
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rbp, %rax
+ movq %rbp, %r14
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r14, %rax
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ mulq %r14
+ addq %r8, %rdx
+ adcq %r13, %rbp
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rdi, %rax
+ adcq %r10, %rdx
+ adcq %r11, %rbp
+ adcq %r12, %rsi
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r14, %rax
+ movq %rbp, %r13
+ sbbq -24(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r8
+ sbbq -16(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -32(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 16(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r9, %r14
+ sbbq 8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r12
+ sbbq (%rsp), %r12 ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rcx, %r12
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rbp, %r13
+ movq %r13, 8(%rcx)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rcx)
+ cmovneq %rbx, %r10
+ movq %r10, 24(%rcx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rcx)
+ cmovneq %r9, %r14
+ movq %r14, 40(%rcx)
+ movq %r12, 48(%rcx)
+ addq $72, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre7L
+ .p2align 4, 0x90
+_mcl_fp_addPre7L: ## @mcl_fp_addPre7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre7L
+ .p2align 4, 0x90
+_mcl_fp_subPre7L: ## @mcl_fp_subPre7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_7L
+ .p2align 4, 0x90
+_mcl_fp_shr1_7L: ## @mcl_fp_shr1_7L
+## BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+
+ .globl _mcl_fp_add7L
+ .p2align 4, 0x90
+_mcl_fp_add7L: ## @mcl_fp_add7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB104_2
+## BB#1: ## %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+LBB104_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF7L
+ .p2align 4, 0x90
+_mcl_fp_addNF7L: ## @mcl_fp_addNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub7L
+ .p2align 4, 0x90
+_mcl_fp_sub7L: ## @mcl_fp_sub7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB106_2
+## BB#1: ## %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+LBB106_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subNF7L
+ .p2align 4, 0x90
+_mcl_fp_subNF7L: ## @mcl_fp_subNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r11
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r14
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm2, %r15
+ movd %xmm5, %r9
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r10
+ movd %xmm1, %r13
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rax
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm0, %rbx
+ movd %xmm3, %rsi
+ subq %rbx, %rsi
+ movd %xmm1, %rbx
+ sbbq %rax, %rbx
+ movd %xmm4, %rbp
+ sbbq %r13, %rbp
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ sbbq 48(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ andq (%r8), %rdx
+ movq 48(%r8), %r14
+ andq %rax, %r14
+ movq 40(%r8), %r15
+ andq %rax, %r15
+ movq 32(%r8), %r12
+ andq %rax, %r12
+ movq 24(%r8), %r13
+ andq %rax, %r13
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %rsi, %rdx
+ adcq %rbx, %rax
+ movq %rdx, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %r13
+ movq %r13, 24(%rdi)
+ adcq %r9, %r12
+ movq %r12, 32(%rdi)
+ adcq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq %r11, %r14
+ movq %r14, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add7L
+ .p2align 4, 0x90
+_mcl_fpDbl_add7L: ## @mcl_fpDbl_add7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub7L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub7L: ## @mcl_fpDbl_sub7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv512x64: ## @mulPv512x64
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rcx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ addq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %rbx
+ movq %rbx, 40(%rdi)
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 48(%rdi)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 56(%rdi)
+ adcq $0, %r10
+ movq %r10, 64(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre8L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre8L: ## @mcl_fp_mulUnitPre8L
+## BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+
+ .globl _mcl_fpDbl_mulPre8L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre8L: ## @mcl_fpDbl_mulPre8L
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %r15
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ callq _mcl_fpDbl_mulPre4L
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ leaq 32(%r15), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rdx
+ movq (%rbx), %rsi
+ movq 8(%rbx), %rdi
+ addq 32(%rbx), %rsi
+ adcq 40(%rbx), %rdi
+ adcq 16(%rbx), %rdx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rcx
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rcx
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -88(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -80(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdi, %rax
+ movq %rax, -72(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -64(%rbp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rsi, -168(%rbp)
+ movq %rdi, -160(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rcx, -112(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rcx
+ movq %rcx, -48(%rbp) ## 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -56(%rbp) ## 8-byte Spill
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ addq -64(%rbp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rbp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rbp), %r13 ## 8-byte Folded Reload
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ adcq -88(%rbp), %r10 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -56(%rbp), %rdx ## 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -200(%rbp), %r12
+ adcq -192(%rbp), %rbx
+ adcq -184(%rbp), %r13
+ adcq -176(%rbp), %r10
+ adcq %rax, %r15
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -56(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -48(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -80(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -72(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -56(%rbp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -48(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -104(%rbp), %r10 ## 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -96(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre8L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre8L: ## @mcl_fpDbl_sqrPre8L
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ movq %rbx, %rdx
+ callq _mcl_fpDbl_mulPre4L
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ movq %rsi, %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq 56(%rbx), %r15
+ movq 48(%rbx), %rax
+ movq (%rbx), %rcx
+ movq 8(%rbx), %rdx
+ addq 32(%rbx), %rcx
+ adcq 40(%rbx), %rdx
+ adcq 16(%rbx), %rax
+ adcq 24(%rbx), %r15
+ pushfq
+ popq %r8
+ pushfq
+ popq %r9
+ pushfq
+ popq %r10
+ pushfq
+ popq %rdi
+ pushfq
+ popq %rbx
+ sbbq %rsi, %rsi
+ movq %rsi, -56(%rbp) ## 8-byte Spill
+ leaq (%rcx,%rcx), %rsi
+ xorl %r11d, %r11d
+ pushq %rbx
+ popfq
+ cmovaeq %r11, %rsi
+ movq %rsi, -48(%rbp) ## 8-byte Spill
+ movq %rdx, %r13
+ shldq $1, %rcx, %r13
+ pushq %rdi
+ popfq
+ cmovaeq %r11, %r13
+ movq %rax, %r12
+ shldq $1, %rdx, %r12
+ pushq %r10
+ popfq
+ cmovaeq %r11, %r12
+ movq %r15, %rbx
+ movq %rcx, -168(%rbp)
+ movq %rdx, -160(%rbp)
+ movq %rax, -152(%rbp)
+ movq %r15, -144(%rbp)
+ movq %rcx, -136(%rbp)
+ movq %rdx, -128(%rbp)
+ movq %rax, -120(%rbp)
+ movq %r15, -112(%rbp)
+ shldq $1, %rax, %r15
+ pushq %r9
+ popfq
+ cmovaeq %r11, %r15
+ shrq $63, %rbx
+ pushq %r8
+ popfq
+ cmovaeq %r11, %rbx
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq -56(%rbp), %rax ## 8-byte Reload
+ andl $1, %eax
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ addq -200(%rbp), %r10
+ adcq -192(%rbp), %r13
+ adcq -184(%rbp), %r12
+ adcq -176(%rbp), %r15
+ adcq %rbx, %rax
+ movq %rax, %rbx
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %r9
+ movq %r9, -56(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -48(%rbp) ## 8-byte Spill
+ sbbq %r9, %r10
+ sbbq %r8, %r13
+ movq 48(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 56(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r15
+ sbbq $0, %rbx
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ movq 104(%r14), %rdi
+ sbbq %rdi, %r13
+ movq 112(%r14), %r8
+ sbbq %r8, %r12
+ movq 120(%r14), %r9
+ sbbq %r9, %r15
+ sbbq $0, %rbx
+ addq -56(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -104(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -96(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r10
+ movq %rax, 56(%r14)
+ movq %r10, 64(%r14)
+ adcq -88(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 72(%r14)
+ adcq -80(%rbp), %r12 ## 8-byte Folded Reload
+ movq %r12, 80(%r14)
+ adcq -72(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 88(%r14)
+ movq %rbx, %rax
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont8L
+ .p2align 4, 0x90
+_mcl_fp_mont8L: ## @mcl_fp_mont8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp ## imm = 0x4E8
+ movq %rcx, %r13
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rsi, 72(%rsp) ## 8-byte Spill
+ movq %rdi, 96(%rsp) ## 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 88(%rsp) ## 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 88(%rsp), %rax ## 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 88(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r15 ## 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq (%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp ## imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF8L
+ .p2align 4, 0x90
+_mcl_fp_montNF8L: ## @mcl_fp_montNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp ## imm = 0x4D8
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ movq %rsi, 56(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 1088(%rsp), %r15
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 952(%rsp), %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 944(%rsp), %r14
+ movq 72(%rsp), %rax ## 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 808(%rsp), %rbp
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 664(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 656(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r12 ## 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 512(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 376(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 368(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 232(%rsp), %r12
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 224(%rsp), %rcx
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 88(%rsp), %rbp
+ movq 32(%rsp), %r11 ## 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq (%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq 80(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp ## imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed8L
+ .p2align 4, 0x90
+_mcl_fp_montRed8L: ## @mcl_fp_montRed8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp ## imm = 0x308
+ movq %rdx, %rax
+ movq %rdi, 192(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 128(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ leaq 704(%rsp), %rdi
+ callq l_mulPv512x64
+ addq 704(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi ## 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rcx ## 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 560(%rsp), %r14
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 56(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 104(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 488(%rsp), %rbp
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 72(%rsp), %rbp ## 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r13 ## 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 16(%rsp), %rbp ## 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 344(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 16(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 96(%rsp), %r14 ## 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 272(%rsp), %r15
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq 104(%rsp), %rdx ## 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 200(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 24(%rsp), %rsi ## 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 136(%rsp), %rax ## 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 144(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 152(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 160(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 168(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 176(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 184(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq 192(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp ## imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre8L
+ .p2align 4, 0x90
+_mcl_fp_addPre8L: ## @mcl_fp_addPre8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre8L
+ .p2align 4, 0x90
+_mcl_fp_subPre8L: ## @mcl_fp_subPre8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_shr1_8L
+ .p2align 4, 0x90
+_mcl_fp_shr1_8L: ## @mcl_fp_shr1_8L
+## BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+
+ .globl _mcl_fp_add8L
+ .p2align 4, 0x90
+_mcl_fp_add8L: ## @mcl_fp_add8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB120_2
+## BB#1: ## %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+LBB120_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF8L
+ .p2align 4, 0x90
+_mcl_fp_addNF8L: ## @mcl_fp_addNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, %r10
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rbx
+ movq %rbx, %r9
+ movq %r9, -16(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rbp
+ movq %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub8L
+ .p2align 4, 0x90
+_mcl_fp_sub8L: ## @mcl_fp_sub8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB122_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+LBB122_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF8L
+ .p2align 4, 0x90
+_mcl_fp_subNF8L: ## @mcl_fp_subNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ movdqu 48(%rdx), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r12
+ movdqu (%rsi), %xmm4
+ movdqu 16(%rsi), %xmm5
+ movdqu 32(%rsi), %xmm8
+ movdqu 48(%rsi), %xmm7
+ pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm3, %r13
+ movd %xmm7, %rdi
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1]
+ movd %xmm3, %rdx
+ movd %xmm2, %rsi
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r11
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %r15
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r14
+ subq %rax, %r14
+ movd %xmm1, %r10
+ sbbq %rbx, %r10
+ movd %xmm5, %rbx
+ sbbq %r15, %rbx
+ movd %xmm2, %r15
+ sbbq %r11, %r15
+ movd %xmm8, %r11
+ sbbq %rsi, %r11
+ sbbq %rbp, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq %r13, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %r12, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ sarq $63, %rbp
+ movq 56(%r8), %r12
+ andq %rbp, %r12
+ movq 48(%r8), %r13
+ andq %rbp, %r13
+ movq 40(%r8), %rdi
+ andq %rbp, %rdi
+ movq 32(%r8), %rsi
+ andq %rbp, %rsi
+ movq 24(%r8), %rdx
+ andq %rbp, %rdx
+ movq 16(%r8), %rcx
+ andq %rbp, %rcx
+ movq 8(%r8), %rax
+ andq %rbp, %rax
+ andq (%r8), %rbp
+ addq %r14, %rbp
+ adcq %r10, %rax
+ movq %rbp, (%r9)
+ adcq %rbx, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r15, %rdx
+ movq %rdx, 24(%r9)
+ adcq %r11, %rsi
+ movq %rsi, 32(%r9)
+ adcq -24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 48(%r9)
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add8L
+ .p2align 4, 0x90
+_mcl_fpDbl_add8L: ## @mcl_fpDbl_add8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rax
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub8L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub8L: ## @mcl_fpDbl_sub8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ sbbq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv576x64: ## @mulPv576x64
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rbx
+ movq %rbx, %rax
+ mulq (%rsi)
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq 64(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ addq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r13, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r9
+ movq %r9, 32(%rdi)
+ adcq -40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 48(%rdi)
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 64(%rdi)
+ adcq $0, %r10
+ movq %r10, 72(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre9L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre9L: ## @mcl_fp_mulUnitPre9L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre9L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre9L: ## @mcl_fpDbl_mulPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rdx, %rax
+ movq %rdi, %r12
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ movq %rbp, 72(%rsp) ## 8-byte Spill
+ callq l_mulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r13 ## 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq 48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r12 ## 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq 56(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq 56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre9L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre9L: ## @mcl_fpDbl_sqrPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rsi, %r15
+ movq %rdi, %r14
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 80(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq %r14, 72(%rsp) ## 8-byte Spill
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 32(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont9L
+ .p2align 4, 0x90
+_mcl_fp_mont9L: ## @mcl_fp_mont9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 96(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r15 ## 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 56(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %r14 ## 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 104(%rsp), %rbp ## 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r9 ## 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r10 ## 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 72(%rsp), %rdx ## 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF9L
+ .p2align 4, 0x90
+_mcl_fp_montNF9L: ## @mcl_fp_montNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 80(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1392(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 104(%rsp), %r15 ## 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 104(%rsp) ## 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 56(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1232(%rsp), %rax
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1072(%rsp), %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 920(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 912(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdx ## 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 760(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r15 ## 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 752(%rsp), %rcx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 600(%rsp), %r13
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 56(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 440(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 432(%rsp), %rcx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rdx ## 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 280(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 272(%rsp), %rcx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ addq 120(%rsp), %rbx
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdi ## 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 40(%rsp), %r9 ## 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed9L
+ .p2align 4, 0x90
+_mcl_fp_montRed9L: ## @mcl_fp_montRed9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp ## imm = 0x3A8
+ movq %rdx, %rax
+ movq %rdi, 208(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 88(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 80(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 200(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 192(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 104(%rsp) ## 8-byte Spill
+ leaq 856(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 856(%rsp), %r14
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 120(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 8(%rsp) ## 8-byte Folded Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, 128(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 696(%rsp), %r15
+ movq 128(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbp ## 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 616(%rsp), %rbx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 112(%rsp), %rcx ## 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 64(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 48(%rsp) ## 8-byte Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 536(%rsp), %rbx
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 72(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 456(%rsp), %r14
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 88(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 376(%rsp), %r15
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 80(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 296(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r9 ## 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 144(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 136(%rsp), %rdi ## 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 152(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 160(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 168(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 176(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 184(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 192(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 200(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq (%rsp), %rsi ## 8-byte Folded Reload
+ movq 208(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp ## imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre9L
+ .p2align 4, 0x90
+_mcl_fp_addPre9L: ## @mcl_fp_addPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subPre9L
+ .p2align 4, 0x90
+_mcl_fp_subPre9L: ## @mcl_fp_subPre9L
+## BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_9L
+ .p2align 4, 0x90
+_mcl_fp_shr1_9L: ## @mcl_fp_shr1_9L
+## BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_add9L
+ .p2align 4, 0x90
+_mcl_fp_add9L: ## @mcl_fp_add9L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB136_2
+## BB#1: ## %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+LBB136_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF9L
+ .p2align 4, 0x90
+_mcl_fp_addNF9L: ## @mcl_fp_addNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r11
+ movq %r11, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub9L
+ .p2align 4, 0x90
+_mcl_fp_sub9L: ## @mcl_fp_sub9L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB138_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+LBB138_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF9L
+ .p2align 4, 0x90
+_mcl_fp_subNF9L: ## @mcl_fp_subNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r11
+ movq %rdi, %rbx
+ movq 64(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movdqu (%rdx), %xmm1
+ movdqu 16(%rdx), %xmm2
+ movdqu 32(%rdx), %xmm3
+ movdqu 48(%rdx), %xmm4
+ pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1]
+ movd %xmm0, %r12
+ movdqu (%rsi), %xmm5
+ movdqu 16(%rsi), %xmm6
+ movdqu 32(%rsi), %xmm7
+ movdqu 48(%rsi), %xmm8
+ pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r10
+ pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1]
+ movd %xmm0, %r9
+ pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1]
+ movd %xmm3, %r8
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rcx
+ pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1]
+ movd %xmm2, %rbp
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %rsi
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %rdi
+ movd %xmm5, %r15
+ subq %rdi, %r15
+ movd %xmm2, %r14
+ sbbq %rsi, %r14
+ movd %xmm6, %r13
+ sbbq %rbp, %r13
+ movd %xmm3, %rbp
+ sbbq %rcx, %rbp
+ movd %xmm7, %rcx
+ sbbq %r8, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ movd %xmm0, %rcx
+ sbbq %r9, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movd %xmm8, %rcx
+ sbbq %r10, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ sbbq %r12, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rsi ## 8-byte Reload
+ sbbq 64(%rdx), %rsi
+ movq %rsi, -40(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ sarq $63, %rax
+ movq %rax, %rcx
+ shldq $1, %rsi, %rcx
+ movq 24(%r11), %r9
+ andq %rcx, %r9
+ movq 8(%r11), %rdi
+ andq %rcx, %rdi
+ andq (%r11), %rcx
+ movq 64(%r11), %r12
+ andq %rax, %r12
+ movq 56(%r11), %r10
+ andq %rax, %r10
+ rolq %rax
+ movq 48(%r11), %r8
+ andq %rax, %r8
+ movq 40(%r11), %rsi
+ andq %rax, %rsi
+ movq 32(%r11), %rdx
+ andq %rax, %rdx
+ andq 16(%r11), %rax
+ addq %r15, %rcx
+ adcq %r14, %rdi
+ movq %rcx, (%rbx)
+ adcq %r13, %rax
+ movq %rdi, 8(%rbx)
+ adcq %rbp, %r9
+ movq %rax, 16(%rbx)
+ movq %r9, 24(%rbx)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 32(%rbx)
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rbx)
+ adcq -32(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 56(%rbx)
+ adcq -40(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 64(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add9L
+ .p2align 4, 0x90
+_mcl_fpDbl_add9L: ## @mcl_fpDbl_add9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -8(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi ## 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -48(%rsp), %r8 ## 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub9L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub9L: ## @mcl_fpDbl_sub9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ sbbq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -40(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+
+.subsections_via_symbols
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s
new file mode 100644
index 000000000..77729c530
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s
@@ -0,0 +1,71547 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192Lbmi2
+ .align 16, 0x90
+ .type makeNIST_P192Lbmi2,@function
+makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2
+# BB#0:
+ movl 4(%esp), %eax
+ movl $-1, 20(%eax)
+ movl $-1, 16(%eax)
+ movl $-1, 12(%eax)
+ movl $-2, 8(%eax)
+ movl $-1, 4(%eax)
+ movl $-1, (%eax)
+ retl $4
+.Lfunc_end0:
+ .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function
+mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 32(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ xorl %edx, %edx
+ movl (%eax), %ebx
+ addl %ecx, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ adcl %edi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ adcl %esi, %ebp
+ movl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ adcl %ecx, %esi
+ movl 40(%eax), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ adcl %ebx, %ecx
+ movl 44(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 20(%eax), %eax
+ adcl %edi, %eax
+ adcl $0, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl %ebx, 24(%esp) # 4-byte Folded Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %eax
+ adcl $0, %edx
+ adcl $0, %edi
+ addl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, 28(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ adcl $0, %ecx
+ adcl $0, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $1, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ adcl $0, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edx
+ adcl $0, %edx
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB1_2
+# BB#1:
+ movl %edx, %eax
+.LBB1_2:
+ testb %bl, %bl
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB1_4
+# BB#3:
+ movl %esi, %edx
+.LBB1_4:
+ movl 52(%esp), %esi
+ movl %edx, (%esi)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB1_6
+# BB#5:
+ movl %ebp, %ebx
+.LBB1_6:
+ movl %ebx, 4(%esi)
+ jne .LBB1_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB1_8:
+ movl %edx, 8(%esi)
+ jne .LBB1_10
+# BB#9:
+ movl 12(%esp), %edi # 4-byte Reload
+.LBB1_10:
+ movl %edi, 12(%esi)
+ jne .LBB1_12
+# BB#11:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB1_12:
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2
+
+ .globl mcl_fp_sqr_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192Lbmi2,@function
+mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L2$pb
+.L2$pb:
+ popl %ebx
+.Ltmp0:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_sqrPre6Lbmi2@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB2_2
+# BB#1:
+ movl %ebp, %edx
+.LBB2_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB2_4
+# BB#3:
+ movl %esi, %ebx
+.LBB2_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB2_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB2_6:
+ movl %ebx, 4(%esi)
+ jne .LBB2_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB2_8:
+ movl %edi, 8(%esi)
+ jne .LBB2_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB2_10:
+ movl %eax, 12(%esi)
+ jne .LBB2_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB2_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2
+
+ .globl mcl_fp_mulNIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192Lbmi2,@function
+mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L3$pb
+.L3$pb:
+ popl %ebx
+.Ltmp1:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, 8(%esp)
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB3_2
+# BB#1:
+ movl %ebp, %edx
+.LBB3_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB3_4
+# BB#3:
+ movl %esi, %ebx
+.LBB3_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB3_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB3_6:
+ movl %ebx, 4(%esi)
+ jne .LBB3_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB3_8:
+ movl %edi, 8(%esi)
+ jne .LBB3_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB3_10:
+ movl %eax, 12(%esi)
+ jne .LBB3_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB3_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P521Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function
+mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ecx
+ movl 124(%ecx), %edx
+ movl 128(%ecx), %esi
+ movl %esi, %eax
+ shldl $23, %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 84(%ecx), %edi
+ shldl $23, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ shldl $23, %edx, %edi
+ movl 76(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl 72(%ecx), %ebx
+ shldl $23, %ebx, %eax
+ movl 68(%ecx), %ebp
+ shldl $23, %ebp, %ebx
+ shrl $9, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ shldl $23, %esi, %ebp
+ andl $511, %esi # imm = 0x1FF
+ addl (%ecx), %ebp
+ adcl 4(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%ecx), %eax
+ adcl 12(%ecx), %edx
+ adcl 16(%ecx), %edi
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 20(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 28(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%ecx), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 36(%ecx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 40(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ adcl 44(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 48(%ecx), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 52(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 56(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 60(%ecx), %ebx
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ shrl $9, %ecx
+ andl $1, %ecx
+ addl %ebp, %ecx
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebx, %ebp
+ adcl $0, %ebp
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %ecx, %edi
+ andl %eax, %edi
+ andl %edx, %edi
+ andl %esi, %edi
+ andl 28(%esp), %edi # 4-byte Folded Reload
+ andl 32(%esp), %edi # 4-byte Folded Reload
+ andl 36(%esp), %edi # 4-byte Folded Reload
+ andl 40(%esp), %edi # 4-byte Folded Reload
+ andl 44(%esp), %edi # 4-byte Folded Reload
+ andl 48(%esp), %edi # 4-byte Folded Reload
+ andl 24(%esp), %edi # 4-byte Folded Reload
+ andl 52(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ andl %esi, %edi
+ andl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ movl 16(%esp), %ebx # 4-byte Reload
+ andl %ebp, %edi
+ movl %ebp, %eax
+ movl %edx, %ebp
+ orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00
+ andl %edi, %ebp
+ andl %ebx, %ebp
+ cmpl $-1, %ebp
+ movl 80(%esp), %edi
+ je .LBB4_1
+# BB#3: # %nonzero
+ movl %ecx, (%edi)
+ movl %ebx, 4(%edi)
+ movl (%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%edi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%edi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%edi)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%edi)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%edi)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%edi)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%edi)
+ movl %esi, 52(%edi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%edi)
+ movl %eax, 60(%edi)
+ andl $511, %edx # imm = 0x1FF
+ movl %edx, 64(%edi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ xorl %eax, %eax
+ movl $17, %ecx
+ rep;stosl
+.LBB4_2: # %zero
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2
+
+ .globl mcl_fp_mulUnitPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1Lbmi2,@function
+mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %edx
+ mulxl 12(%esp), %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2
+
+ .globl mcl_fpDbl_mulPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1Lbmi2,@function
+mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 8(%esp), %eax
+ mulxl (%eax), %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2
+
+ .globl mcl_fpDbl_sqrPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1Lbmi2,@function
+mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %edx
+ mulxl %edx, %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2
+
+ .globl mcl_fp_mont1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont1Lbmi2,@function
+mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %edx
+ movl 20(%esp), %eax
+ mulxl (%eax), %esi, %ecx
+ movl 24(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl %ecx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl %eax, %ecx
+ subl %edi, %ecx
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB8_2
+# BB#1:
+ movl %ecx, %eax
+.LBB8_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end8:
+ .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2
+
+ .globl mcl_fp_montNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF1Lbmi2,@function
+mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %edx
+ movl 20(%esp), %eax
+ mulxl (%eax), %esi, %ecx
+ movl 24(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ subl %edi, %ecx
+ js .LBB9_2
+# BB#1:
+ movl %ecx, %eax
+.LBB9_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end9:
+ .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2
+
+ .globl mcl_fp_montRed1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed1Lbmi2,@function
+mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %esi
+ movl 20(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl 4(%ecx), %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl %eax, %ecx
+ subl %edi, %ecx
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB10_2
+# BB#1:
+ movl %ecx, %eax
+.LBB10_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end10:
+ .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2
+
+ .globl mcl_fp_addPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre1Lbmi2,@function
+mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ retl
+.Lfunc_end11:
+ .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2
+
+ .globl mcl_fp_subPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre1Lbmi2,@function
+mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ xorl %eax, %eax
+ movl 8(%esp), %edx
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, (%edx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end12:
+ .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2
+
+ .globl mcl_fp_shr1_1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_1Lbmi2,@function
+mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ shrl %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end13:
+ .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2
+
+ .globl mcl_fp_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add1Lbmi2,@function
+mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 20(%esp), %esi
+ subl (%esi), %eax
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movl %eax, (%ecx)
+.LBB14_2: # %carry
+ popl %esi
+ retl
+.Lfunc_end14:
+ .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2
+
+ .globl mcl_fp_addNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF1Lbmi2,@function
+mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ addl (%ecx), %eax
+ movl 16(%esp), %edx
+ movl %eax, %ecx
+ subl (%edx), %ecx
+ js .LBB15_2
+# BB#1:
+ movl %ecx, %eax
+.LBB15_2:
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end15:
+ .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2
+
+ .globl mcl_fp_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub1Lbmi2,@function
+mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ xorl %edx, %edx
+ movl 8(%esp), %ecx
+ movl 16(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, (%ecx)
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB16_2
+# BB#1: # %nocarry
+ popl %esi
+ retl
+.LBB16_2: # %carry
+ movl 20(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ popl %esi
+ retl
+.Lfunc_end16:
+ .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2
+
+ .globl mcl_fp_subNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF1Lbmi2,@function
+mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ movl 12(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl 16(%esp), %edx
+ andl (%edx), %ecx
+ addl %eax, %ecx
+ movl 4(%esp), %eax
+ movl %ecx, (%eax)
+ retl
+.Lfunc_end17:
+ .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2
+
+ .globl mcl_fpDbl_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add1Lbmi2,@function
+mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ movl 16(%esp), %esi
+ addl (%esi), %edx
+ movl 12(%esp), %ecx
+ adcl 4(%esi), %eax
+ movl %edx, (%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ movl %eax, %edx
+ subl (%esi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB18_2
+# BB#1:
+ movl %edx, %eax
+.LBB18_2:
+ movl %eax, 4(%ecx)
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end18:
+ .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2
+
+ .globl mcl_fpDbl_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1Lbmi2,@function
+mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ xorl %ecx, %ecx
+ movl 16(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %eax
+ movl 8(%esp), %edx
+ movl %esi, (%edx)
+ sbbl $0, %ecx
+ andl $1, %ecx
+ je .LBB19_2
+# BB#1:
+ movl 20(%esp), %ecx
+ movl (%ecx), %ecx
+.LBB19_2:
+ addl %eax, %ecx
+ movl %ecx, 4(%edx)
+ popl %esi
+ retl
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2
+
+ .globl mcl_fp_mulUnitPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2Lbmi2,@function
+mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %edx
+ movl 16(%esp), %eax
+ mulxl 4(%eax), %ecx, %esi
+ mulxl (%eax), %eax, %edx
+ movl 12(%esp), %edi
+ movl %eax, (%edi)
+ addl %ecx, %edx
+ movl %edx, 4(%edi)
+ adcl $0, %esi
+ movl %esi, 8(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2
+
+ .globl mcl_fpDbl_mulPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2Lbmi2,@function
+mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 28(%esp), %esi
+ movl (%esi), %edi
+ movl %ecx, %edx
+ mulxl %edi, %ebx, %ebp
+ movl %eax, %edx
+ mulxl %edi, %edx, %edi
+ addl %ebx, %edi
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl $0, %ebp
+ movl 4(%esi), %esi
+ movl %eax, %edx
+ mulxl %esi, %eax, %ebx
+ addl %edi, %eax
+ movl %ecx, %edx
+ mulxl %esi, %edx, %ecx
+ adcl %ebp, %edx
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %ebx, %edx
+ movl 20(%esp), %edi
+ movl %eax, 4(%edi)
+ movl %edx, 8(%edi)
+ adcl %ecx, %esi
+ movl %esi, 12(%edi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2
+
+ .globl mcl_fpDbl_sqrPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2Lbmi2,@function
+mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ movl %eax, %edx
+ mulxl %eax, %edx, %edi
+ movl %edx, (%esi)
+ movl %ecx, %edx
+ mulxl %eax, %edx, %eax
+ addl %edx, %edi
+ movl %eax, %ebx
+ adcl $0, %ebx
+ addl %edx, %edi
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %ecx
+ adcl %ebx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl %eax, %edx
+ movl %edi, 4(%esi)
+ movl %edx, 8(%esi)
+ adcl %ecx, %ebx
+ movl %ebx, 12(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2
+
+ .globl mcl_fp_mont2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont2Lbmi2,@function
+mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %ecx, %esi
+ movl %edi, %edx
+ mulxl %eax, %edx, %edi
+ movl %edx, (%esp) # 4-byte Spill
+ addl %ecx, %edi
+ adcl $0, %esi
+ movl 56(%esp), %eax
+ movl -4(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ imull %ecx, %edx
+ movl (%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %ecx
+ mulxl %ebx, %edx, %eax
+ addl %ebp, %eax
+ adcl $0, %ecx
+ addl (%esp), %edx # 4-byte Folded Reload
+ adcl %edi, %eax
+ adcl %esi, %ecx
+ movl 52(%esp), %edx
+ movl 4(%edx), %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ mulxl 4(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ mulxl 8(%esp), %edi, %esi # 4-byte Folded Reload
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %eax, %edi
+ adcl %ecx, %esi
+ adcl %ebx, %ebp
+ sbbl %ecx, %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ movl %edx, %eax
+ mulxl 16(%esp), %ebx, %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl 20(%esp), %edx, %eax # 4-byte Folded Reload
+ addl 12(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ecx
+ addl %edi, %ebx
+ adcl %esi, %edx
+ adcl %ebp, %eax
+ adcl $0, %ecx
+ movl %edx, %ebp
+ subl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %esi
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB23_2
+# BB#1:
+ movl %ebp, %edx
+.LBB23_2:
+ movl 44(%esp), %edi
+ movl %edx, (%edi)
+ testb %cl, %cl
+ jne .LBB23_4
+# BB#3:
+ movl %esi, %eax
+.LBB23_4:
+ movl %eax, 4(%edi)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end23:
+ .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2
+
+ .globl mcl_fp_montNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF2Lbmi2,@function
+mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 44(%esp), %eax
+ movl (%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %edi, %ebp
+ movl %ecx, %edx
+ mulxl %eax, %ecx, %esi
+ addl %edi, %esi
+ adcl $0, %ebp
+ movl 52(%esp), %eax
+ movl -4(%eax), %ebx
+ movl %ecx, %edx
+ imull %ebx, %edx
+ movl (%eax), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl 52(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %edx
+ adcl %esi, %edi
+ adcl $0, %ebp
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl 48(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 4(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 8(%esp), %eax, %ecx # 4-byte Folded Reload
+ addl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ adcl $0, %esi
+ imull %eax, %ebx
+ movl %ebx, %edx
+ movl 16(%esp), %ebp # 4-byte Reload
+ mulxl %ebp, %edx, %edi
+ addl %eax, %edx
+ movl %ebx, %edx
+ movl 12(%esp), %ebx # 4-byte Reload
+ mulxl %ebx, %eax, %edx
+ adcl %ecx, %eax
+ adcl $0, %esi
+ addl %edi, %eax
+ adcl %edx, %esi
+ movl %eax, %edx
+ subl %ebp, %edx
+ movl %esi, %ecx
+ sbbl %ebx, %ecx
+ testl %ecx, %ecx
+ js .LBB24_2
+# BB#1:
+ movl %edx, %eax
+.LBB24_2:
+ movl 40(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB24_4
+# BB#3:
+ movl %ecx, %esi
+.LBB24_4:
+ movl %esi, 4(%edx)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end24:
+ .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2
+
+ .globl mcl_fp_montRed2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed2Lbmi2,@function
+mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %esi
+ movl -4(%esi), %ecx
+ movl (%esi), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, %edx
+ imull %ecx, %edx
+ movl 4(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %esi
+ mulxl %edi, %edx, %eax
+ addl %ebp, %eax
+ adcl $0, %esi
+ addl %ebx, %edx
+ movl 40(%esp), %edi
+ movl 12(%edi), %edx
+ adcl 4(%edi), %eax
+ adcl 8(%edi), %esi
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ imull %eax, %ecx
+ movl %ecx, %edx
+ mulxl 8(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, (%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl 12(%esp), %edx, %ebp # 4-byte Folded Reload
+ addl (%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebp
+ andl $1, %ebx
+ addl %eax, %edi
+ adcl %esi, %edx
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edx, %edi
+ subl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %ecx
+ sbbl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB25_2
+# BB#1:
+ movl %edi, %edx
+.LBB25_2:
+ movl 36(%esp), %esi
+ movl %edx, (%esi)
+ testb %bl, %bl
+ jne .LBB25_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB25_4:
+ movl %ebp, 4(%esi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end25:
+ .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2
+
+ .globl mcl_fp_addPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre2Lbmi2,@function
+mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ adcl 4(%edx), %eax
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end26:
+ .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2
+
+ .globl mcl_fp_subPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre2Lbmi2,@function
+mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ xorl %eax, %eax
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end27:
+ .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2
+
+ .globl mcl_fp_shr1_2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_2Lbmi2,@function
+mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ shrl %eax
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end28:
+ .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2
+
+ .globl mcl_fp_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add2Lbmi2,@function
+mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ addl (%esi), %eax
+ movl 12(%esp), %edx
+ adcl 4(%esi), %ecx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+.LBB29_2: # %carry
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end29:
+ .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2
+
+ .globl mcl_fp_addNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF2Lbmi2,@function
+mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 16(%esp), %edx
+ addl (%edx), %ecx
+ adcl 4(%edx), %eax
+ movl 24(%esp), %edi
+ movl %ecx, %esi
+ subl (%edi), %esi
+ movl %eax, %edx
+ sbbl 4(%edi), %edx
+ testl %edx, %edx
+ js .LBB30_2
+# BB#1:
+ movl %esi, %ecx
+.LBB30_2:
+ movl 12(%esp), %esi
+ movl %ecx, (%esi)
+ js .LBB30_4
+# BB#3:
+ movl %edx, %eax
+.LBB30_4:
+ movl %eax, 4(%esi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end30:
+ .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2
+
+ .globl mcl_fp_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub2Lbmi2,@function
+mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ xorl %ebx, %ebx
+ movl 24(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl 16(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB31_2
+# BB#1: # %carry
+ movl 28(%esp), %esi
+ movl 4(%esi), %edi
+ addl (%esi), %ecx
+ movl %ecx, (%edx)
+ adcl %eax, %edi
+ movl %edi, 4(%edx)
+.LBB31_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end31:
+ .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2
+
+ .globl mcl_fp_subNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF2Lbmi2,@function
+mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 20(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl %eax, %edx
+ sarl $31, %edx
+ movl 24(%esp), %esi
+ movl 4(%esi), %edi
+ andl %edx, %edi
+ andl (%esi), %edx
+ addl %ecx, %edx
+ movl 12(%esp), %ecx
+ movl %edx, (%ecx)
+ adcl %eax, %edi
+ movl %edi, 4(%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end32:
+ .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2
+
+ .globl mcl_fpDbl_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add2Lbmi2,@function
+mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 12(%edx), %esi
+ movl 24(%esp), %edi
+ movl 12(%edi), %eax
+ movl 8(%edx), %ecx
+ movl (%edx), %ebx
+ movl 4(%edx), %ebp
+ addl (%edi), %ebx
+ adcl 4(%edi), %ebp
+ movl 20(%esp), %edx
+ adcl 8(%edi), %ecx
+ movl %ebx, (%edx)
+ movl %ebp, 4(%edx)
+ adcl %esi, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ movl %ecx, %esi
+ subl (%ebp), %esi
+ movl %eax, %edi
+ sbbl 4(%ebp), %edi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB33_2
+# BB#1:
+ movl %edi, %eax
+.LBB33_2:
+ testb %bl, %bl
+ jne .LBB33_4
+# BB#3:
+ movl %esi, %ecx
+.LBB33_4:
+ movl %ecx, 8(%edx)
+ movl %eax, 12(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end33:
+ .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2
+
+ .globl mcl_fpDbl_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2Lbmi2,@function
+mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %ebx, %ebx
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %eax
+ sbbl 8(%edx), %eax
+ movl 12(%edx), %ebp
+ movl 12(%ecx), %edx
+ movl 20(%esp), %ecx
+ movl %esi, (%ecx)
+ movl %edi, 4(%ecx)
+ sbbl %ebp, %edx
+ movl 32(%esp), %edi
+ movl (%edi), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB34_1
+# BB#2:
+ xorl %edi, %edi
+ jmp .LBB34_3
+.LBB34_1:
+ movl 4(%edi), %edi
+.LBB34_3:
+ testb %bl, %bl
+ jne .LBB34_5
+# BB#4:
+ xorl %esi, %esi
+.LBB34_5:
+ addl %eax, %esi
+ movl %esi, 8(%ecx)
+ adcl %edx, %edi
+ movl %edi, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2
+
+ .globl mcl_fp_mulUnitPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3Lbmi2,@function
+mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl 20(%esp), %eax
+ mulxl 4(%eax), %esi, %ecx
+ mulxl (%eax), %edi, %ebx
+ addl %esi, %ebx
+ mulxl 8(%eax), %eax, %edx
+ movl 16(%esp), %esi
+ movl %edi, (%esi)
+ movl %ebx, 4(%esi)
+ adcl %ecx, %eax
+ movl %eax, 8(%esi)
+ adcl $0, %edx
+ movl %edx, 12(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2
+
+ .globl mcl_fpDbl_mulPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3Lbmi2,@function
+mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl (%esi), %edi
+ mulxl %edi, %ebx, %ebp
+ movl %eax, %edx
+ movl %eax, %esi
+ mulxl %edi, %edx, %eax
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %edi, %ebx, %edi
+ adcl %ebp, %ebx
+ movl 36(%esp), %ecx
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ adcl $0, %edi
+ movl 44(%esp), %ecx
+ movl 4(%ecx), %ebp
+ movl %esi, %edx
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ebx, %edx
+ adcl %edi, %ebx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %edi
+ movl 36(%esp), %edx
+ movl %ecx, 4(%edx)
+ movl 44(%esp), %ecx
+ movl 8(%ecx), %ecx
+ movl %esi, %edx
+ mulxl %ecx, %ebp, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %ecx
+ adcl %edi, %edx
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, 8(%edi)
+ movl %eax, 12(%edi)
+ movl %edx, 16(%edi)
+ adcl %ecx, %esi
+ movl %esi, 20(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2
+
+ .globl mcl_fpDbl_sqrPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3Lbmi2,@function
+mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 44(%esp), %edx
+ movl 8(%edx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl 4(%edx), %esi
+ movl 40(%esp), %eax
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %ebx
+ movl %edx, (%eax)
+ movl %esi, %edx
+ mulxl %ecx, %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %ebp, %ebx
+ movl %edi, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl %edx, %edi
+ adcl $0, %ecx
+ addl %ebp, %ebx
+ movl %esi, %edx
+ mulxl %esi, %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, %edx
+ mulxl %esi, %edx, %esi
+ adcl %edx, %ecx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edi
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl %eax, %edx
+ mulxl %eax, %edx, %eax
+ adcl %edi, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edx
+ movl 40(%esp), %esi
+ movl %ebx, 4(%esi)
+ movl %ebp, 8(%esi)
+ movl %ecx, 12(%esi)
+ movl %edx, 16(%esi)
+ adcl %eax, %edi
+ movl %edi, 20(%esi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2
+
+ .globl mcl_fp_mont3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont3Lbmi2,@function
+mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl (%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebp
+ movl %esi, %edx
+ mulxl %ecx, %edx, %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ebx
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %esi
+ movl -4(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edi
+ addl %ecx, %edi
+ movl 8(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %esi
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %edi
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 16(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 8(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ mulxl 12(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, %ebx
+ adcl %ecx, %ebp
+ adcl %esi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %eax, %edx
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 40(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ mulxl 36(%esp), %esi, %ebx # 4-byte Folded Reload
+ addl %ecx, %ebx
+ mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 24(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %eax, %esi
+ adcl %ebp, %ebx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 16(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 8(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 12(%esp), %eax, %esi # 4-byte Folded Reload
+ addl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ adcl %edi, %ebp
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %eax
+ mulxl 40(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ebx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ebp, %edx
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edi, %ebp
+ subl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %esi
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %eax, %ecx
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB38_2
+# BB#1:
+ movl %ebp, %edi
+.LBB38_2:
+ movl 64(%esp), %ebp
+ movl %edi, (%ebp)
+ testb %bl, %bl
+ jne .LBB38_4
+# BB#3:
+ movl %esi, %edx
+.LBB38_4:
+ movl %edx, 4(%ebp)
+ jne .LBB38_6
+# BB#5:
+ movl %ecx, %eax
+.LBB38_6:
+ movl %eax, 8(%ebp)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end38:
+ .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2
+
+ .globl mcl_fp_montNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF3Lbmi2,@function
+mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %ecx, %edi, %ebp
+ addl %esi, %ebp
+ movl 8(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 68(%esp), %esi
+ movl -4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ imull %ecx, %edx
+ movl (%esi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ addl %edi, %esi
+ movl 68(%esp), %esi
+ movl 4(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %ecx
+ adcl %ebp, %edi
+ movl 8(%esi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl %esi, %ebp, %edx
+ adcl %eax, %ebp
+ adcl $0, %ebx
+ addl 4(%esp), %edi # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ adcl %edx, %ebx
+ movl 64(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 12(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ mulxl 16(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ mulxl 8(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 4(%esp), %edx # 4-byte Reload
+ addl %edi, %edx
+ adcl %ebp, %ecx
+ adcl %ebx, %esi
+ adcl $0, %eax
+ movl %edx, %ebp
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 32(%esp), %ebx, %edi # 4-byte Folded Reload
+ addl %ebp, %ebx
+ mulxl 28(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ mulxl 24(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %esi, %ecx
+ adcl $0, %eax
+ addl %edi, %ebp
+ adcl %ebx, %ecx
+ adcl %edx, %eax
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 12(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ mulxl 16(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ addl %esi, %edi
+ mulxl 8(%esp), %ebx, %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, 16(%esp) # 4-byte Folded Spill
+ adcl %ecx, %edi
+ adcl %eax, %ebx
+ adcl $0, %esi
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ imull %ecx, %edx
+ mulxl 32(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %edx, %eax
+ mulxl 28(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 24(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebx, %eax
+ adcl $0, %esi
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl %edx, %esi
+ movl %ecx, %ebp
+ subl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edi
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB39_2
+# BB#1:
+ movl %ebp, %ecx
+.LBB39_2:
+ movl 56(%esp), %ebx
+ movl %ecx, (%ebx)
+ js .LBB39_4
+# BB#3:
+ movl %edi, %eax
+.LBB39_4:
+ movl %eax, 4(%ebx)
+ js .LBB39_6
+# BB#5:
+ movl %edx, %esi
+.LBB39_6:
+ movl %esi, 8(%ebx)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end39:
+ .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2
+
+ .globl mcl_fp_montRed3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed3Lbmi2,@function
+mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ecx
+ movl -4(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, %edx
+ imull %edi, %edx
+ movl 8(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl %edi, %edi, %eax
+ movl %edi, 16(%esp) # 4-byte Spill
+ mulxl %ecx, %ebp, %edi
+ mulxl %esi, %edx, %ecx
+ addl %ebp, %ecx
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl %ebx, %edx
+ movl 60(%esp), %edx
+ adcl 4(%edx), %ecx
+ adcl 8(%edx), %edi
+ adcl 12(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl 16(%edx), %edx
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %ecx, %edx
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 24(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ addl %esi, %eax
+ mulxl 32(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %ecx, 4(%esp) # 4-byte Folded Spill
+ adcl %edi, %eax
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 24(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ mulxl 28(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 32(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl %esi, %edi
+ adcl %ebp, %ecx
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edi, %ebp
+ subl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %esi
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %eax
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB40_2
+# BB#1:
+ movl %ebp, %edi
+.LBB40_2:
+ movl 56(%esp), %ebp
+ movl %edi, (%ebp)
+ testb %bl, %bl
+ jne .LBB40_4
+# BB#3:
+ movl %esi, %ecx
+.LBB40_4:
+ movl %ecx, 4(%ebp)
+ jne .LBB40_6
+# BB#5:
+ movl %eax, %edx
+.LBB40_6:
+ movl %edx, 8(%ebp)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end40:
+ .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2
+
+ .globl mcl_fp_addPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre3Lbmi2,@function
+mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 12(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ movl %eax, 8(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end41:
+ .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2
+
+ .globl mcl_fp_subPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre3Lbmi2,@function
+mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 20(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl %esi, 4(%edi)
+ movl %ecx, 8(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end42:
+ .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2
+
+ .globl mcl_fp_shr1_3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_3Lbmi2,@function
+mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl 8(%eax), %ecx
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl 8(%esp), %esi
+ movl %edx, (%esi)
+ shrdl $1, %ecx, %eax
+ movl %eax, 4(%esi)
+ shrl %ecx
+ movl %ecx, 8(%esi)
+ popl %esi
+ retl
+.Lfunc_end43:
+ .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2
+
+ .globl mcl_fp_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add3Lbmi2,@function
+mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 20(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl 16(%esp), %esi
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ sbbl 8(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+.LBB44_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end44:
+ .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2
+
+ .globl mcl_fp_addNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF3Lbmi2,@function
+mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %edx
+ adcl 4(%esi), %ecx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 32(%esp), %ebp
+ movl %edx, %ebx
+ subl (%ebp), %ebx
+ movl %ecx, %edi
+ sbbl 4(%ebp), %edi
+ movl %eax, %esi
+ sbbl 8(%ebp), %esi
+ movl %esi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB45_2
+# BB#1:
+ movl %ebx, %edx
+.LBB45_2:
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ js .LBB45_4
+# BB#3:
+ movl %edi, %ecx
+.LBB45_4:
+ movl %ecx, 4(%ebx)
+ js .LBB45_6
+# BB#5:
+ movl %esi, %eax
+.LBB45_6:
+ movl %eax, 8(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end45:
+ .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2
+
+ .globl mcl_fp_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub3Lbmi2,@function
+mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ xorl %ebx, %ebx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl 20(%esp), %esi
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB46_2
+# BB#1: # %carry
+ movl 32(%esp), %edi
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ebp
+ addl (%edi), %ecx
+ movl %ecx, (%esi)
+ adcl %eax, %ebx
+ movl %ebx, 4(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%esi)
+.LBB46_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end46:
+ .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2
+
+ .globl mcl_fp_subNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF3Lbmi2,@function
+mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %edi
+ shldl $1, %eax, %edi
+ movl 32(%esp), %ebx
+ andl (%ebx), %edi
+ movl 8(%ebx), %ebp
+ andl %esi, %ebp
+ andl 4(%ebx), %esi
+ addl %ecx, %edi
+ adcl %edx, %esi
+ movl 20(%esp), %ecx
+ movl %edi, (%ecx)
+ movl %esi, 4(%ecx)
+ adcl %eax, %ebp
+ movl %ebp, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end47:
+ .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2
+
+ .globl mcl_fpDbl_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add3Lbmi2,@function
+mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 16(%esi), %edi
+ movl 12(%esi), %ebx
+ movl (%esi), %edx
+ movl 28(%esp), %eax
+ addl (%eax), %edx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%esi), %edx
+ movl 4(%esi), %esi
+ adcl 4(%eax), %esi
+ adcl 8(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 20(%eax), %ebp
+ movl %edx, 8(%ecx)
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl %ebx, %esi
+ adcl %edi, %edx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 36(%esp), %ecx
+ movl %esi, %ebx
+ subl (%ecx), %ebx
+ movl %edx, %edi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl 36(%esp), %edi
+ sbbl 8(%edi), %ecx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB48_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB48_2:
+ testb %al, %al
+ jne .LBB48_4
+# BB#3:
+ movl %ebx, %esi
+.LBB48_4:
+ movl 24(%esp), %eax
+ movl %esi, 12(%eax)
+ jne .LBB48_6
+# BB#5:
+ movl (%esp), %edx # 4-byte Reload
+.LBB48_6:
+ movl %edx, 16(%eax)
+ movl %ebp, 20(%eax)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end48:
+ .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2
+
+ .globl mcl_fpDbl_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3Lbmi2,@function
+mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ movl 28(%esp), %ebx
+ subl (%ebx), %edx
+ sbbl 4(%ebx), %esi
+ movl 8(%ecx), %ebp
+ sbbl 8(%ebx), %ebp
+ movl 20(%esp), %eax
+ movl %edx, (%eax)
+ movl 12(%ecx), %edi
+ sbbl 12(%ebx), %edi
+ movl %esi, 4(%eax)
+ movl 16(%ecx), %esi
+ sbbl 16(%ebx), %esi
+ movl 20(%ebx), %ebx
+ movl 20(%ecx), %edx
+ movl %ebp, 8(%eax)
+ sbbl %ebx, %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %ebp
+ jne .LBB49_1
+# BB#2:
+ xorl %ebx, %ebx
+ jmp .LBB49_3
+.LBB49_1:
+ movl 8(%ebp), %ebx
+.LBB49_3:
+ testb %cl, %cl
+ movl $0, %eax
+ jne .LBB49_4
+# BB#5:
+ xorl %ecx, %ecx
+ jmp .LBB49_6
+.LBB49_4:
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+.LBB49_6:
+ addl %edi, %ecx
+ adcl %esi, %eax
+ movl 20(%esp), %esi
+ movl %ecx, 12(%esi)
+ movl %eax, 16(%esi)
+ adcl %edx, %ebx
+ movl %ebx, 20(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2
+
+ .globl mcl_fp_mulUnitPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4Lbmi2,@function
+mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 24(%esp), %eax
+ mulxl 4(%eax), %esi, %ecx
+ mulxl (%eax), %edi, %ebx
+ addl %esi, %ebx
+ mulxl 8(%eax), %ebp, %esi
+ adcl %ecx, %ebp
+ mulxl 12(%eax), %eax, %ecx
+ movl 20(%esp), %edx
+ movl %edi, (%edx)
+ movl %ebx, 4(%edx)
+ movl %ebp, 8(%edx)
+ adcl %esi, %eax
+ movl %eax, 12(%edx)
+ adcl $0, %ecx
+ movl %ecx, 16(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2
+
+ .globl mcl_fpDbl_mulPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4Lbmi2,@function
+mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx
+ movl (%ecx), %ebp
+ mulxl %ebp, %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %ecx
+ mulxl %ebp, %edx, %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ movl 8(%eax), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ mulxl %ebp, %eax, %edi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 12(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %esi
+ adcl %edi, %ebp
+ movl 48(%esp), %edx
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ adcl $0, %esi
+ movl 56(%esp), %edx
+ movl 4(%edx), %edi
+ movl %ecx, %edx
+ mulxl %edi, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %edi, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %edi, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl 16(%esp), %edx # 4-byte Reload
+ mulxl %edi, %edi, %edx
+ adcl %esi, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl 48(%esp), %edx
+ movl %ecx, 4(%edx)
+ movl 56(%esp), %ecx
+ movl 8(%ecx), %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 16(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edi, %eax
+ adcl %ebp, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ movl 48(%esp), %eax
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 56(%esp), %eax
+ movl 12(%eax), %edx
+ movl 52(%esp), %eax
+ mulxl (%eax), %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ mulxl 4(%ebx), %ecx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ mulxl 8(%ebx), %eax, %esi
+ adcl %edi, %eax
+ mulxl 12(%ebx), %edi, %edx
+ adcl %ebp, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl 48(%esp), %esi
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ movl %edi, 24(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 28(%esi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2
+
+ .globl mcl_fpDbl_sqrPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4Lbmi2,@function
+mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 56(%esp), %ebx
+ movl %esi, %edx
+ mulxl %esi, %eax, %ebp
+ movl %eax, (%ebx)
+ movl %edi, %edx
+ mulxl %esi, %edx, %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ addl %edx, %eax
+ movl 60(%esp), %edx
+ movl 8(%edx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ mulxl %esi, %edx, %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl %edx, %ebp
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ecx
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %edi, %ebx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %edi, %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ mulxl %edi, %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ adcl %ecx, %ebp
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ mulxl %edx, %edi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %eax
+ adcl %ebp, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ adcl %ecx, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl %ebx, 8(%eax)
+ movl 60(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl (%eax), %ebx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ mulxl 4(%eax), %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ mulxl 8(%eax), %ecx, %edi
+ adcl %ebp, %ecx
+ mulxl %edx, %ebp, %edx
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl %edi, %ebp
+ movl 56(%esp), %edi
+ movl %ebx, 12(%edi)
+ movl %esi, 16(%edi)
+ movl %ecx, 20(%edi)
+ movl %ebp, 24(%edi)
+ adcl %edx, %eax
+ movl %eax, 28(%edi)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2
+
+ .globl mcl_fp_mont4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont4Lbmi2,@function
+mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl 12(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx
+ movl (%ecx), %ecx
+ movl 8(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebp
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %ecx, %edx, %eax
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %ebx, %esi
+ movl %edi, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ addl %esi, %eax
+ movl %eax, %ebp
+ movl 8(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 12(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ebx
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ mulxl 32(%esp), %ecx, %ebp # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ adcl %esi, 12(%esp) # 4-byte Folded Spill
+ adcl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 56(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 48(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ adcl $0, %eax
+ movl 16(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ movl 4(%esp), %edx # 4-byte Reload
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload
+ addl %esi, %ecx
+ movl %ecx, %esi
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, %edi
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebx
+ mulxl 52(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ movl 16(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 28(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %edi, 24(%esp) # 4-byte Spill
+ mulxl 32(%esp), %edi, %ebp # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 40(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ mulxl 36(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 32(%esp), %ebp # 4-byte Reload
+ addl %ebx, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl %eax, %edi
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ imull %ebp, %edx
+ mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload
+ addl %esi, %ebp
+ mulxl 52(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %ebx, %edx
+ mulxl 48(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ecx
+ movl 44(%esp), %ebx # 4-byte Reload
+ addl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %esi
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ebp, %edi
+ subl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ebx
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB53_2
+# BB#1:
+ movl %edi, %ebp
+.LBB53_2:
+ movl 84(%esp), %edi
+ movl %ebp, (%edi)
+ testb %cl, %cl
+ jne .LBB53_4
+# BB#3:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB53_4:
+ movl %esi, 4(%edi)
+ jne .LBB53_6
+# BB#5:
+ movl 60(%esp), %edx # 4-byte Reload
+.LBB53_6:
+ movl %edx, 8(%edi)
+ jne .LBB53_8
+# BB#7:
+ movl %ebx, %eax
+.LBB53_8:
+ movl %eax, 12(%edi)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end53:
+ .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2
+
+ .globl mcl_fp_montNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF4Lbmi2,@function
+mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %ecx, %ebp, %eax
+ movl %ebp, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 8(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl 12(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %edi
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 84(%esp), %ecx
+ movl -4(%ecx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull %ecx, %edx
+ movl 84(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ecx
+ movl 4(%ecx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ adcl %ebx, %ecx
+ movl %ecx, %ebp
+ movl 84(%esp), %ecx
+ movl 12(%ecx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ adcl %esi, %ebx
+ adcl $0, %edi
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl %ecx, 12(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ adcl %edx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 24(%esp), %esi, %edi # 4-byte Folded Reload
+ mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload
+ addl %esi, %eax
+ mulxl 16(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 20(%esp), %edi, %esi # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ movl %ecx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 44(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 40(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl %ebx, %eax
+ mulxl 36(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ movl %ebx, %ebp
+ mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %esi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl 80(%esp), %ecx
+ movl 8(%ecx), %edx
+ mulxl 24(%esp), %ecx, %ebx # 4-byte Folded Reload
+ mulxl 28(%esp), %eax, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ mulxl 16(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ mulxl 20(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl %esi, %ebx
+ adcl $0, %ecx
+ movl %eax, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ addl (%esp), %eax # 4-byte Folded Reload
+ mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ mulxl 36(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 32(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 80(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 24(%esp), %ebx, %ebp # 4-byte Folded Reload
+ mulxl 28(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 16(%esp), %edi, %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ mulxl 20(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ecx, %ebp
+ adcl $0, %ebx
+ movl 48(%esp), %edx # 4-byte Reload
+ imull 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl 44(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ mulxl 40(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl 32(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl $0, %ebx
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ebx
+ movl %esi, %edi
+ subl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edx
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ testl %edx, %edx
+ js .LBB54_2
+# BB#1:
+ movl %edi, %esi
+.LBB54_2:
+ movl 72(%esp), %edi
+ movl %esi, (%edi)
+ js .LBB54_4
+# BB#3:
+ movl %ebp, %ecx
+.LBB54_4:
+ movl %ecx, 4(%edi)
+ js .LBB54_6
+# BB#5:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB54_6:
+ movl %eax, 8(%edi)
+ js .LBB54_8
+# BB#7:
+ movl %edx, %ebx
+.LBB54_8:
+ movl %ebx, 12(%edi)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end54:
+ .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2
+
+ .globl mcl_fp_montRed4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed4Lbmi2,@function
+mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %ecx
+ movl -4(%ecx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl (%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl (%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl 12(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 8(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ecx
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %ecx
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %ecx
+ mulxl %edi, %edx, %esi
+ addl %ebx, %esi
+ movl %ecx, %edi
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 4(%ebp), %esi
+ adcl 8(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 12(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ movl 24(%ebp), %edx
+ movl 20(%ebp), %edi
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %esi, %edx
+ imull 40(%esp), %edx # 4-byte Folded Reload
+ mulxl %eax, %ebp, %edi
+ mulxl 44(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ mulxl 48(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl 28(%esp), %ecx # 4-byte Reload
+ mulxl %ecx, %edi, %edx
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %esi, 4(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl %eax, %edx
+ imull 40(%esp), %edx # 4-byte Folded Reload
+ mulxl %ecx, %esi, %ecx
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 32(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, 4(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl (%esp), %ecx # 4-byte Folded Reload
+ mulxl 48(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %eax, 8(%esp) # 4-byte Folded Spill
+ adcl %ebp, %ecx
+ adcl %edi, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 40(%esp), %edx # 4-byte Reload
+ imull %ecx, %edx
+ mulxl 44(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 32(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %eax, %ebp
+ movl %edx, %eax
+ mulxl 48(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %eax, %edx
+ mulxl 28(%esp), %edx, %esi # 4-byte Folded Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ecx, 40(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %ebp, %ecx
+ subl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, %eax
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB55_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB55_2:
+ movl 72(%esp), %ecx
+ movl %ebp, (%ecx)
+ testb %bl, %bl
+ jne .LBB55_4
+# BB#3:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB55_4:
+ movl %edi, 4(%ecx)
+ jne .LBB55_6
+# BB#5:
+ movl 48(%esp), %edx # 4-byte Reload
+.LBB55_6:
+ movl %edx, 8(%ecx)
+ jne .LBB55_8
+# BB#7:
+ movl %eax, %esi
+.LBB55_8:
+ movl %esi, 12(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end55:
+ .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2
+
+ .globl mcl_fp_addPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre4Lbmi2,@function
+mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 12(%esi), %esi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl %edx, 4(%ebx)
+ movl %eax, 8(%ebx)
+ adcl %edi, %esi
+ movl %esi, 12(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end56:
+ .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2
+
+ .globl mcl_fp_subPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre4Lbmi2,@function
+mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 12(%edi), %edi
+ movl 12(%ecx), %ecx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl %esi, 4(%ebp)
+ movl %ebx, 8(%ebp)
+ sbbl %edi, %ecx
+ movl %ecx, 12(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end57:
+ .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2
+
+ .globl mcl_fp_shr1_4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_4Lbmi2,@function
+mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 8(%eax), %edx
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl 12(%esp), %edi
+ movl %esi, (%edi)
+ shrdl $1, %edx, %eax
+ movl %eax, 4(%edi)
+ shrdl $1, %ecx, %edx
+ movl %edx, 8(%edi)
+ shrl %ecx
+ movl %ecx, 12(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end58:
+ .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2
+
+ .globl mcl_fp_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add4Lbmi2,@function
+mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edi), %edx
+ adcl 8(%esi), %edx
+ movl 12(%esi), %esi
+ adcl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+.LBB59_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end59:
+ .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2
+
+ .globl mcl_fp_addNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF4Lbmi2,@function
+mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ecx
+ movl 32(%esp), %edi
+ addl (%edi), %esi
+ adcl 4(%edi), %ecx
+ movl 12(%edx), %ebp
+ movl 8(%edx), %edx
+ adcl 8(%edi), %edx
+ adcl 12(%edi), %ebp
+ movl 40(%esp), %eax
+ movl %esi, %ebx
+ subl (%eax), %ebx
+ movl %ecx, %edi
+ sbbl 4(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 40(%esp), %eax
+ sbbl 8(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 40(%esp), %eax
+ sbbl 12(%eax), %edi
+ testl %edi, %edi
+ js .LBB60_2
+# BB#1:
+ movl %ebx, %esi
+.LBB60_2:
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ js .LBB60_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB60_4:
+ movl %ecx, 4(%ebx)
+ js .LBB60_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB60_6:
+ movl %edx, 8(%ebx)
+ js .LBB60_8
+# BB#7:
+ movl %edi, %ebp
+.LBB60_8:
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end60:
+ .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2
+
+ .globl mcl_fp_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub4Lbmi2,@function
+mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ movl 8(%esi), %edx
+ sbbl 8(%edi), %edx
+ movl 12(%esi), %esi
+ sbbl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB61_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl 8(%ebx), %ebp
+ adcl 4(%ebx), %ecx
+ movl 12(%ebx), %ebx
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%edi)
+ adcl %esi, %ebx
+ movl %ebx, 12(%edi)
+.LBB61_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end61:
+ .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2
+
+ .globl mcl_fp_subNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF4Lbmi2,@function
+mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 36(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl 8(%eax), %edx
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %edi
+ movl %edi, %esi
+ sarl $31, %esi
+ movl 40(%esp), %eax
+ movl 12(%eax), %ebp
+ andl %esi, %ebp
+ movl 8(%eax), %ecx
+ andl %esi, %ecx
+ movl 40(%esp), %eax
+ movl 4(%eax), %eax
+ andl %esi, %eax
+ movl 40(%esp), %ebx
+ andl (%ebx), %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ adcl %edx, %ecx
+ movl %eax, 4(%ebx)
+ movl %ecx, 8(%ebx)
+ adcl %edi, %ebp
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end62:
+ .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2
+
+ .globl mcl_fpDbl_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add4Lbmi2,@function
+mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 36(%esp), %esi
+ addl (%esi), %edi
+ adcl 4(%esi), %edx
+ movl 8(%eax), %ebx
+ adcl 8(%esi), %ebx
+ movl 12(%esi), %ebp
+ movl 32(%esp), %ecx
+ movl %edi, (%ecx)
+ movl 16(%esi), %edi
+ adcl 12(%eax), %ebp
+ adcl 16(%eax), %edi
+ movl %edx, 4(%ecx)
+ movl 28(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, 8(%ecx)
+ movl 24(%eax), %ebx
+ movl 20(%eax), %eax
+ movl %ebp, 12(%ecx)
+ movl 20(%esi), %edx
+ adcl %eax, %edx
+ movl 28(%esi), %ecx
+ movl 24(%esi), %ebp
+ adcl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 44(%esp), %eax
+ movl %edi, %esi
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 4(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ecx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB63_2
+# BB#1:
+ movl %esi, %ebp
+.LBB63_2:
+ testb %bl, %bl
+ jne .LBB63_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB63_4:
+ movl 32(%esp), %eax
+ movl %edi, 16(%eax)
+ jne .LBB63_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB63_6:
+ movl %edx, 20(%eax)
+ movl %ebp, 24(%eax)
+ movl 8(%esp), %edx # 4-byte Reload
+ jne .LBB63_8
+# BB#7:
+ movl %ecx, %edx
+.LBB63_8:
+ movl %edx, 28(%eax)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end63:
+ .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2
+
+ .globl mcl_fpDbl_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4Lbmi2,@function
+mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 32(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %ebx
+ sbbl 8(%ebp), %ebx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%ebp), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %ebx
+ sbbl %esi, %ebx
+ movl 24(%ebp), %edx
+ movl 24(%eax), %esi
+ sbbl %edx, %esi
+ movl 28(%ebp), %edx
+ movl 28(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 36(%esp), %ecx
+ movl (%ecx), %eax
+ jne .LBB64_1
+# BB#2:
+ xorl %ebp, %ebp
+ jmp .LBB64_3
+.LBB64_1:
+ movl 4(%ecx), %ebp
+.LBB64_3:
+ testb %dl, %dl
+ jne .LBB64_5
+# BB#4:
+ movl $0, %eax
+.LBB64_5:
+ jne .LBB64_6
+# BB#7:
+ movl $0, %edx
+ jmp .LBB64_8
+.LBB64_6:
+ movl 12(%ecx), %edx
+.LBB64_8:
+ jne .LBB64_9
+# BB#10:
+ xorl %ecx, %ecx
+ jmp .LBB64_11
+.LBB64_9:
+ movl 8(%ecx), %ecx
+.LBB64_11:
+ addl %edi, %eax
+ adcl %ebx, %ebp
+ movl 24(%esp), %edi
+ movl %eax, 16(%edi)
+ adcl %esi, %ecx
+ movl %ebp, 20(%edi)
+ movl %ecx, 24(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2
+
+ .globl mcl_fp_mulUnitPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5Lbmi2,@function
+mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl 32(%esp), %ecx
+ mulxl 4(%ecx), %esi, %eax
+ mulxl (%ecx), %edi, %ebx
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ mulxl 8(%ecx), %ebp, %esi
+ adcl %eax, %ebp
+ mulxl 12(%ecx), %eax, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %esi, %eax
+ mulxl 16(%ecx), %ecx, %edx
+ movl 28(%esp), %esi
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl %ebx, 4(%esi)
+ movl %ebp, 8(%esi)
+ movl %eax, 12(%esi)
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esi)
+ adcl $0, %edx
+ movl %edx, 20(%esi)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2
+
+ .globl mcl_fpDbl_mulPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5Lbmi2,@function
+mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 72(%esp), %eax
+ movl (%eax), %ebp
+ mulxl %ebp, %esi, %edi
+ movl %ebx, %edx
+ mulxl %ebp, %edx, %eax
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %esi, %ebx
+ adcl %edi, %esi
+ movl 12(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %ebp, %edi, %ecx
+ adcl %ebx, %edi
+ movl 68(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %edx
+ adcl %ecx, %ebp
+ movl 64(%esp), %ecx
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ecx)
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx
+ movl 4(%ecx), %ebx
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %ecx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %ecx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %eax, %edx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 68(%esp), %ebx
+ movl (%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 4(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %edx
+ movl %edx, %ebp
+ movl 8(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 12(%ebx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 16(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %edx
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebx
+ movl 64(%esp), %eax
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 72(%esp), %eax
+ movl 12(%eax), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, %esi
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ movl 72(%esp), %eax
+ movl 16(%eax), %edx
+ movl 68(%esp), %eax
+ mulxl (%eax), %esi, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ebp, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 4(%eax), %ebx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 8(%eax), %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ mulxl 12(%eax), %ecx, %ebp
+ adcl %edi, %ecx
+ mulxl 16(%eax), %edi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %edi
+ movl 64(%esp), %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ebp)
+ movl %ebx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl %ecx, 28(%ebp)
+ movl %edi, 32(%ebp)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ebp)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2
+
+ .globl mcl_fpDbl_sqrPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5Lbmi2,@function
+mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ecx
+ movl (%ecx), %edi
+ movl 4(%ecx), %esi
+ movl %esi, %edx
+ mulxl %edi, %ebp, %ebx
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %edi, %edx, %eax
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %edi, %ebp, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %edi, %ecx, %ebx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %edi, %edi, %edx
+ adcl %ebx, %edi
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp
+ movl %ebx, (%ebp)
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %esi, %ebx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %esi, %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %esi, %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edi, %edx
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 64(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ mulxl %esi, %edx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 4(%eax), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %esi, %edx
+ mulxl %esi, %ebp, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ adcl %ecx, %ebp
+ movl 64(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl %esi, %edx
+ mulxl %eax, %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl 36(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edx, %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edx, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl %ebp, %edx
+ movl %edx, %edi
+ movl 60(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 64(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, %edx
+ mulxl %esi, %ebp, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 16(%eax), %ecx
+ movl %ecx, %edx
+ mulxl %esi, %esi, %edx
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl %ecx, %edx
+ movl 64(%esp), %eax
+ mulxl (%eax), %edx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ movl 64(%esp), %eax
+ mulxl 4(%eax), %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ecx, %edx
+ mulxl 12(%eax), %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %esi
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%esi)
+ movl %edi, 20(%esi)
+ movl %ebx, 24(%esi)
+ movl %ebp, 28(%esi)
+ movl %edx, 32(%esi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2
+
+ .globl mcl_fp_mont5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont5Lbmi2,@function
+mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %eax
+ movl 16(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %ecx
+ movl 12(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 8(%eax), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl (%eax), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 4(%eax), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %eax
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %ecx, %edx, %esi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %ecx, %edx, %edi
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ecx, %edx, %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp
+ movl -4(%ebp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%ebp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 4(%ebp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebx
+ mulxl %eax, %esi, %edi
+ movl %esi, 12(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl 8(%ebp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ecx
+ adcl %ebx, %esi
+ movl 12(%ebp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 16(%ebp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 12(%esp), %edx # 4-byte Reload
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ andl $1, %edi
+ movl 112(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %ebp # 4-byte Folded Reload
+ addl %eax, %ebp
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %edi
+ mulxl 72(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 68(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ mulxl 64(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %edx, %ebx
+ adcl $0, %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %esi
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload
+ mulxl 44(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %edi
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 24(%esp), %ebp # 4-byte Reload
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 72(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebp, %eax
+ mulxl 64(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %ebp
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %edi # 4-byte Reload
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %edi # 4-byte Folded Reload
+ addl %eax, %edi
+ mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %ebp
+ mulxl 72(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %eax
+ mulxl 68(%esp), %ebx, %edi # 4-byte Folded Reload
+ adcl %ecx, %ebx
+ mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl $0, %esi
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %edi # 4-byte Reload
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 16(%edx), %edx
+ mulxl 40(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %ebp, %edi
+ mulxl 48(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl %ebx, 52(%esp) # 4-byte Folded Spill
+ adcl %ecx, 56(%esp) # 4-byte Folded Spill
+ adcl %esi, %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload
+ addl %eax, %ebp
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl %edx, %ecx
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %ecx, %edx
+ mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ecx
+ andl $1, %ebx
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebx
+ subl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 80(%esp), %eax # 4-byte Folded Reload
+ movl %esi, %ebp
+ sbbl 72(%esp), %ebp # 4-byte Folded Reload
+ sbbl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 64(%esp), %edx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB68_2
+# BB#1:
+ movl %eax, %edi
+.LBB68_2:
+ testb %bl, %bl
+ movl 44(%esp), %ebx # 4-byte Reload
+ jne .LBB68_4
+# BB#3:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB68_4:
+ movl 104(%esp), %eax
+ movl %ebx, (%eax)
+ movl %edi, 4(%eax)
+ jne .LBB68_6
+# BB#5:
+ movl %ebp, %esi
+.LBB68_6:
+ movl %esi, 8(%eax)
+ movl 60(%esp), %esi # 4-byte Reload
+ jne .LBB68_8
+# BB#7:
+ movl 80(%esp), %esi # 4-byte Reload
+.LBB68_8:
+ movl %esi, 12(%eax)
+ jne .LBB68_10
+# BB#9:
+ movl %edx, %ecx
+.LBB68_10:
+ movl %ecx, 16(%eax)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end68:
+ .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2
+
+ .globl mcl_fp_montNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF5Lbmi2,@function
+mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 92(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 4(%edi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx
+ movl (%ecx), %ebx
+ mulxl %ebx, %ecx, %esi
+ movl %eax, %edx
+ mulxl %ebx, %edx, %eax
+ movl %edx, 60(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, %ecx
+ movl 8(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebx, %eax, %ebp
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 12(%edi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %ebx, %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%edi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %eax
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %edi
+ movl 8(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 12(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %ebp
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 16(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 40(%esp), %edi, %eax # 4-byte Folded Reload
+ addl %ecx, %eax
+ mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ mulxl 28(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl (%esp), %esi # 4-byte Folded Reload
+ mulxl 24(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ mulxl 60(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %eax, %edi
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %esi, %eax
+ mulxl 48(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 40(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %ecx, %esi
+ mulxl 32(%esp), %edi, %ecx # 4-byte Folded Reload
+ adcl %eax, %edi
+ mulxl 28(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 24(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl %eax, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ movl %ebp, %esi
+ mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, %eax
+ mulxl 52(%esp), %ebp, %edi # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl %ebp, %ebx
+ mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 40(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %esi, %ecx
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %edi, %esi
+ mulxl 24(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 48(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 12(%esp), %edi # 4-byte Reload
+ addl %edi, 16(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload
+ mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload
+ addl %eax, %ebx
+ mulxl 32(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ mulxl 28(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ mulxl 24(%esp), %edx, %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ mulxl 60(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl %edx, %eax
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %eax, %edx
+ mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %edi, %eax
+ subl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sarl $31, %edx
+ testl %edx, %edx
+ js .LBB69_2
+# BB#1:
+ movl 40(%esp), %edi # 4-byte Reload
+.LBB69_2:
+ movl 88(%esp), %edx
+ movl %edi, (%edx)
+ js .LBB69_4
+# BB#3:
+ movl 44(%esp), %ebx # 4-byte Reload
+.LBB69_4:
+ movl %ebx, 4(%edx)
+ js .LBB69_6
+# BB#5:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB69_6:
+ movl %ecx, 8(%edx)
+ js .LBB69_8
+# BB#7:
+ movl 60(%esp), %ebp # 4-byte Reload
+.LBB69_8:
+ movl %ebp, 12(%edx)
+ js .LBB69_10
+# BB#9:
+ movl 64(%esp), %esi # 4-byte Reload
+.LBB69_10:
+ movl %esi, 16(%edx)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end69:
+ .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2
+
+ .globl mcl_fp_montRed5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed5Lbmi2,@function
+mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ imull %esi, %edx
+ movl 16(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %eax
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %eax
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %eax
+ mulxl %ecx, %esi, %ecx
+ mulxl %edi, %edx, %ebx
+ addl %esi, %ebx
+ adcl %ebp, %ecx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %ebp
+ adcl 4(%ebp), %ebx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 12(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%ebp), %edx
+ movl 32(%ebp), %esi
+ movl 28(%ebp), %edi
+ movl 24(%ebp), %eax
+ adcl $0, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, %esi
+ movl %ebx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ mulxl 64(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %ebx, 4(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %ecx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl %eax, %edi, %eax
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ecx, 8(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, %eax
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 12(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl 48(%esp), %edx # 4-byte Reload
+ imull %esi, %edx
+ mulxl 52(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload
+ addl %ecx, %edi
+ movl %edx, %ebp
+ mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ebp, %edx
+ mulxl 64(%esp), %eax, %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 40(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %esi
+ adcl $0, %esi
+ movl %edi, %ebx
+ subl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB70_2
+# BB#1:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB70_2:
+ movl %esi, %eax
+ testb %al, %al
+ jne .LBB70_4
+# BB#3:
+ movl 52(%esp), %edi # 4-byte Reload
+.LBB70_4:
+ movl 88(%esp), %esi
+ movl %edi, (%esi)
+ movl %ecx, 4(%esi)
+ movl 48(%esp), %eax # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB70_6
+# BB#5:
+ movl %ebx, %ecx
+.LBB70_6:
+ movl %ecx, 8(%esi)
+ jne .LBB70_8
+# BB#7:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB70_8:
+ movl %eax, 12(%esi)
+ jne .LBB70_10
+# BB#9:
+ movl %ebp, %edx
+.LBB70_10:
+ movl %edx, 16(%esi)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end70:
+ .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2
+
+ .globl mcl_fp_addPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre5Lbmi2,@function
+mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 24(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 12(%esi), %ebx
+ movl 16(%esi), %esi
+ adcl 12(%eax), %ebx
+ movl 16(%eax), %eax
+ movl 20(%esp), %ebp
+ movl %ecx, (%ebp)
+ movl %edx, 4(%ebp)
+ movl %edi, 8(%ebp)
+ movl %ebx, 12(%ebp)
+ adcl %esi, %eax
+ movl %eax, 16(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end71:
+ .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2
+
+ .globl mcl_fp_subPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre5Lbmi2,@function
+mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%esi), %edx
+ movl 16(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 16(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end72:
+ .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2
+
+ .globl mcl_fp_shr1_5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_5Lbmi2,@function
+mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl 16(%eax), %ecx
+ movl 12(%eax), %edx
+ movl 8(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edi
+ movl 16(%esp), %ebx
+ movl %edi, (%ebx)
+ shrdl $1, %esi, %eax
+ movl %eax, 4(%ebx)
+ shrdl $1, %edx, %esi
+ movl %esi, 8(%ebx)
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%ebx)
+ shrl %ecx
+ movl %ecx, 16(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end73:
+ .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2
+
+ .globl mcl_fp_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add5Lbmi2,@function
+mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %ecx
+ movl 24(%esp), %edi
+ addl (%edi), %eax
+ adcl 4(%edi), %ecx
+ movl 8(%ebx), %edx
+ adcl 8(%edi), %edx
+ movl 12(%edi), %esi
+ movl 16(%edi), %edi
+ adcl 12(%ebx), %esi
+ adcl 16(%ebx), %edi
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl 16(%ebp), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+.LBB74_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end74:
+ .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2
+
+ .globl mcl_fp_addNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF5Lbmi2,@function
+mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl 44(%esp), %edi
+ addl (%edi), %ebx
+ adcl 4(%edi), %eax
+ movl 16(%esi), %ecx
+ movl 12(%esi), %edx
+ movl 8(%esi), %ebp
+ adcl 8(%edi), %ebp
+ adcl 12(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl %ebx, %esi
+ subl (%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ sbbl 4(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%edi), %esi
+ sbbl 12(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 16(%edi), %edx
+ movl %edx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB75_2
+# BB#1:
+ movl (%esp), %ebx # 4-byte Reload
+.LBB75_2:
+ movl 40(%esp), %edi
+ movl %ebx, (%edi)
+ js .LBB75_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB75_4:
+ movl %eax, 4(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ js .LBB75_6
+# BB#5:
+ movl %esi, %ebp
+.LBB75_6:
+ movl %ebp, 8(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ js .LBB75_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB75_8:
+ movl %ecx, 12(%edi)
+ js .LBB75_10
+# BB#9:
+ movl %edx, %eax
+.LBB75_10:
+ movl %eax, 16(%edi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end75:
+ .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2
+
+ .globl mcl_fp_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub5Lbmi2,@function
+mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ movl 8(%edi), %edx
+ sbbl 8(%ebp), %edx
+ movl 12(%edi), %esi
+ sbbl 12(%ebp), %esi
+ movl 16(%edi), %edi
+ sbbl 16(%ebp), %edi
+ movl 20(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %ecx, 4(%ebp)
+ movl %edx, 8(%ebp)
+ movl %esi, 12(%ebp)
+ movl %edi, 16(%ebp)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl %eax, (%ebp)
+ adcl 4(%ebx), %ecx
+ movl %ecx, 4(%ebp)
+ adcl 8(%ebx), %edx
+ movl %edx, 8(%ebp)
+ movl 12(%ebx), %eax
+ adcl %esi, %eax
+ movl %eax, 12(%ebp)
+ movl 16(%ebx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%ebp)
+.LBB76_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end76:
+ .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2
+
+ .globl mcl_fp_subNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF5Lbmi2,@function
+mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 4(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 16(%edi), %esi
+ movl 12(%edi), %eax
+ movl 8(%edi), %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ movl %ebx, %ebp
+ shldl $1, %esi, %ebp
+ movl 48(%esp), %edi
+ movl 4(%edi), %ecx
+ andl %ebp, %ecx
+ andl (%edi), %ebp
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ rorxl $31, %ebx, %eax
+ andl 12(%edi), %ebx
+ andl 8(%edi), %eax
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, (%edi)
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 8(%edi)
+ movl %ebx, 12(%edi)
+ adcl %esi, %edx
+ movl %edx, 16(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end77:
+ .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2
+
+ .globl mcl_fpDbl_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add5Lbmi2,@function
+mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 12(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 8(%edx), %esi
+ movl (%edx), %edi
+ addl (%ecx), %edi
+ movl 48(%esp), %eax
+ movl %edi, (%eax)
+ movl 4(%edx), %edi
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %esi
+ adcl 12(%edx), %ebx
+ adcl 16(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, 4(%eax)
+ movl 28(%edx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl %esi, 8(%eax)
+ movl 20(%edx), %esi
+ movl %ebx, 12(%eax)
+ movl 20(%ecx), %ebp
+ adcl %esi, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%eax)
+ movl 24(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl 32(%ecx), %esi
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl 36(%ecx), %edx
+ adcl %eax, %edx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebp, %ecx
+ movl 60(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 4(%ebp), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 8(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %edx, %esi
+ sbbl 12(%ebp), %ebx
+ sbbl 16(%ebp), %edx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB78_2
+# BB#1:
+ movl %edx, %esi
+.LBB78_2:
+ testb %al, %al
+ movl 12(%esp), %ebp # 4-byte Reload
+ jne .LBB78_4
+# BB#3:
+ movl (%esp), %ebp # 4-byte Reload
+.LBB78_4:
+ movl 48(%esp), %eax
+ movl %ebp, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %edi # 4-byte Reload
+ jne .LBB78_6
+# BB#5:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB78_6:
+ movl %edi, 24(%eax)
+ jne .LBB78_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB78_8:
+ movl %edx, 28(%eax)
+ jne .LBB78_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB78_10:
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end78:
+ .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2
+
+ .globl mcl_fpDbl_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5Lbmi2,@function
+mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 44(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 36(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %ebp
+ sbbl %esi, %ebp
+ movl 28(%edx), %esi
+ movl 28(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ movl 32(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%edx), %edx
+ movl 36(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 48(%esp), %ebx
+ jne .LBB79_1
+# BB#2:
+ xorl %eax, %eax
+ jmp .LBB79_3
+.LBB79_1:
+ movl 16(%ebx), %eax
+.LBB79_3:
+ testb %dl, %dl
+ jne .LBB79_4
+# BB#5:
+ movl $0, %edx
+ movl $0, %esi
+ jmp .LBB79_6
+.LBB79_4:
+ movl (%ebx), %esi
+ movl 4(%ebx), %edx
+.LBB79_6:
+ jne .LBB79_7
+# BB#8:
+ movl $0, %edi
+ jmp .LBB79_9
+.LBB79_7:
+ movl 12(%ebx), %edi
+.LBB79_9:
+ jne .LBB79_10
+# BB#11:
+ xorl %ebx, %ebx
+ jmp .LBB79_12
+.LBB79_10:
+ movl 8(%ebx), %ebx
+.LBB79_12:
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %esi, 20(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2
+
+ .globl mcl_fp_mulUnitPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6Lbmi2,@function
+mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %edx
+ movl 36(%esp), %esi
+ mulxl 4(%esi), %ecx, %edi
+ mulxl (%esi), %eax, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ mulxl 8(%esi), %ebp, %eax
+ adcl %edi, %ebp
+ mulxl 12(%esi), %ecx, %edi
+ adcl %eax, %ecx
+ mulxl 16(%esi), %eax, %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl %edi, %eax
+ mulxl 20(%esi), %edx, %esi
+ movl 32(%esp), %edi
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl %ebp, 8(%edi)
+ movl %ecx, 12(%edi)
+ movl %eax, 16(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%edi)
+ adcl $0, %esi
+ movl %esi, 24(%edi)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2
+
+ .globl mcl_fpDbl_mulPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6Lbmi2,@function
+mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %ebp
+ movl (%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 4(%ebp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %esi, %edi
+ movl %ecx, %edx
+ mulxl %eax, %edx, %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ebx
+ adcl %edi, %esi
+ movl 12(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ mulxl %eax, %edi, %ebp
+ adcl %ebx, %edi
+ movl 16(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ecx, %edx
+ movl 20(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 76(%esp), %edx
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%edx)
+ adcl $0, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl 4(%edx), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %edx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ addl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %eax, %edx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 80(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp
+ movl 4(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, %esi
+ movl 12(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 16(%ebp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 16(%esp), %eax # 4-byte Reload
+ addl %eax, 52(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 76(%esp), %eax
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 84(%esp), %eax
+ movl 12(%eax), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ adcl %ecx, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl %ecx, 52(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 80(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 16(%eax), %eax
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 80(%esp), %edx
+ movl 8(%edx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 80(%esp), %esi
+ movl %esi, %edx
+ movl 12(%edx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl 80(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 80(%esp), %edx
+ movl 20(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 84(%esp), %eax
+ movl 20(%eax), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%eax)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%eax)
+ movl %ecx, 28(%eax)
+ movl %esi, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edx, 40(%eax)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2
+
+ .globl mcl_fpDbl_sqrPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6Lbmi2,@function
+mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl %eax, %edx
+ mulxl %ecx, %edi, %esi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl %ecx, %ebx, %edx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 12(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%ebp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %ebp, %edx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ecx
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, (%ecx)
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl %ecx, 56(%esp) # 4-byte Folded Spill
+ movl %eax, %edx
+ mulxl %eax, %esi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebp
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 84(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ mulxl %ebp, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 4(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %eax, %esi
+ movl 12(%esi), %eax
+ movl %eax, %edx
+ mulxl %ebp, %ebx, %edx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 16(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %esi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ addl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ mulxl %eax, %ecx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ adcl %ebp, %ebx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 80(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ movl 84(%esp), %esi
+ movl (%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 16(%esi), %ebp
+ mulxl %ebp, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl %ebp, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl %ebx, %edx
+ mulxl %ebp, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl %edx, 52(%esp) # 4-byte Folded Spill
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl %ebx, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%eax)
+ movl %esi, 28(%eax)
+ movl %edi, 32(%eax)
+ movl %ecx, 36(%eax)
+ movl %ebx, 40(%eax)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2
+
+ .globl mcl_fp_mont6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont6Lbmi2,@function
+mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl 20(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %ebp
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %edi
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %esi
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl (%eax), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %edx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %ebx, %edx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ addl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull %eax, %edx
+ movl (%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 4(%edi), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ebp
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %esi
+ adcl %ebp, %ecx
+ movl 12(%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ adcl %esi, %ebx
+ movl 16(%edi), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ebp
+ adcl %eax, %esi
+ movl 20(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ adcl %ebp, %edi
+ adcl $0, %eax
+ movl 12(%esp), %edx # 4-byte Reload
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl %edx, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl %eax, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %edi
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %eax
+ mulxl 84(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 80(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ mulxl 76(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %ebx, %edi # 4-byte Folded Reload
+ mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ addl %ebx, %esi
+ mulxl 56(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ebp, %edi
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 4(%esp), %ebx # 4-byte Reload
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %ecx
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ addl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %ebx
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %edi
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %ecx
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %esi, %ebp
+ mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 16(%esp), %esi # 4-byte Reload
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 16(%edx), %edx
+ mulxl 64(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ addl %ebx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %eax, %edi
+ movl %edi, %ebx
+ mulxl 68(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %edx
+ adcl $0, %edx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ adcl %esi, %ebx
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %eax # 4-byte Reload
+ addl 8(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 20(%edx), %edx
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %esi, %ebx
+ mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ecx, %esi
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ecx
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 92(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ addl %edi, %ecx
+ mulxl 88(%esp), %edi, %ebx # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl %edx, %esi
+ mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl %esi, %edx
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl %esi, %edx
+ mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ andl $1, 72(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 52(%esp), %eax # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ subl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, %edi
+ sbbl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 84(%esp), %ebx # 4-byte Folded Reload
+ sbbl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 76(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB83_2
+# BB#1:
+ movl %ecx, 68(%esp) # 4-byte Spill
+.LBB83_2:
+ testb %al, %al
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB83_4
+# BB#3:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB83_4:
+ movl 120(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ jne .LBB83_6
+# BB#5:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB83_6:
+ movl %ebp, 8(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB83_8
+# BB#7:
+ movl %ebx, %ecx
+.LBB83_8:
+ movl %ecx, 12(%eax)
+ jne .LBB83_10
+# BB#9:
+ movl 96(%esp), %edi # 4-byte Reload
+.LBB83_10:
+ movl %edi, 16(%eax)
+ jne .LBB83_12
+# BB#11:
+ movl %esi, %edx
+.LBB83_12:
+ movl %edx, 20(%eax)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end83:
+ .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2
+
+ .globl mcl_fp_montNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF6Lbmi2,@function
+mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ebx
+ movl (%ebx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 4(%ebx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %ecx, %esi
+ movl %edi, %edx
+ mulxl %eax, %edx, %ebp
+ movl %edx, 76(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ movl 8(%ebx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edi
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 12(%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edi
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ adcl %edi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ movl 4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 8(%ebx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 16(%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 20(%ebx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %eax
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 48(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload
+ addl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 44(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %edi, %eax
+ mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, %edi
+ mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, %edx
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %esi, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ addl %esi, %ebp
+ mulxl 76(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl %ebp, %esi
+ mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %ecx
+ mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %eax, %ebp
+ movl %ebp, %eax
+ mulxl 64(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ mulxl 60(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %ebx, %edi
+ movl %edi, %ebx
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ addl %eax, %edi
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %esi
+ mulxl 32(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ adcl $0, %eax
+ movl %eax, %edx
+ movl (%esp), %ebp # 4-byte Reload
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl %ebp, %eax
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, %edi
+ mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %ecx
+ mulxl 68(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mulxl 64(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ mulxl 60(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %eax, %esi
+ mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, %edi
+ mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl %ecx, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ mulxl 72(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mulxl 64(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ mulxl 60(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl %eax, %edi
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %esi, %ecx
+ mulxl 36(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ adcl $0, %edx
+ movl 24(%esp), %ebp # 4-byte Reload
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ mulxl 64(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %edi
+ mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 24(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ addl %ebx, %esi
+ mulxl 44(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 40(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %ebp
+ mulxl 36(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl %ecx, 48(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ imull %ebp, %edx
+ mulxl 80(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ mulxl 72(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %edx, %ebx
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl 60(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %ebp, %edx
+ subl 80(%esp), %edx # 4-byte Folded Reload
+ sbbl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ movl %ebx, %esi
+ sbbl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ sbbl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edi
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB84_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB84_2:
+ movl 104(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%ebp)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB84_4
+# BB#3:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB84_4:
+ movl %ecx, 4(%ebp)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ js .LBB84_6
+# BB#5:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB84_6:
+ movl %edx, 8(%ebp)
+ js .LBB84_8
+# BB#7:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB84_8:
+ movl %ecx, 12(%ebp)
+ js .LBB84_10
+# BB#9:
+ movl %ebx, %esi
+.LBB84_10:
+ movl %esi, 16(%ebp)
+ js .LBB84_12
+# BB#11:
+ movl %edi, %eax
+.LBB84_12:
+ movl %eax, 20(%ebp)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end84:
+ .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2
+
+ .globl mcl_fp_montRed6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed6Lbmi2,@function
+mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl (%edi), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl 20(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl %esi, %ecx, %eax
+ movl %ecx, 48(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl %eax, %ebp
+ movl 8(%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 84(%esp), %ecx # 4-byte Folded Reload
+ movl 112(%esp), %ecx
+ adcl 4(%ecx), %ebp
+ adcl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 20(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 24(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 40(%ecx), %esi
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %ebx
+ movl 28(%ecx), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ mulxl 64(%esp), %edi, %ecx # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ mulxl %esi, %edx, %eax
+ adcl (%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl %ebx, 8(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl %esi, %ecx, %eax
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %ecx, %ebx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ mulxl 80(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %eax, %edi
+ movl %edi, %eax
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ adcl %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %esi
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %edi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %edi # 4-byte Reload
+ adcl %edi, 32(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 64(%esp), %edi, %esi # 4-byte Folded Reload
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebx, %ecx # 4-byte Folded Reload
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 80(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, %ebx
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %ebx, %edx
+ mulxl 68(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 72(%esp), %eax, %edi # 4-byte Folded Reload
+ addl %ecx, %eax
+ mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %edi, %ebp
+ movl %edx, %edi
+ mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ movl %edi, %edx
+ mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, %edx
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 60(%esp), %edx # 4-byte Reload
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ subl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 72(%esp), %eax # 4-byte Folded Reload
+ movl %esi, %ebp
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ sbbl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 80(%esp), %ebx # 4-byte Folded Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ sbbl $0, %edx
+ andl $1, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ jne .LBB85_2
+# BB#1:
+ movl %eax, 60(%esp) # 4-byte Spill
+.LBB85_2:
+ movl 84(%esp), %eax # 4-byte Reload
+ testb %al, %al
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB85_4
+# BB#3:
+ movl 68(%esp), %ecx # 4-byte Reload
+.LBB85_4:
+ movl 108(%esp), %eax
+ movl %ecx, (%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB85_6
+# BB#5:
+ movl %ebp, %esi
+.LBB85_6:
+ movl %esi, 8(%eax)
+ jne .LBB85_8
+# BB#7:
+ movl 76(%esp), %ecx # 4-byte Reload
+.LBB85_8:
+ movl %ecx, 12(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB85_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB85_10:
+ movl %ecx, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB85_12
+# BB#11:
+ movl %edi, %ecx
+.LBB85_12:
+ movl %ecx, 20(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end85:
+ .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2
+
+ .globl mcl_fp_addPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre6Lbmi2,@function
+mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 4(%eax), %ecx
+ adcl 4(%edx), %ecx
+ movl %ecx, 4(%esi)
+ movl 8(%eax), %ecx
+ adcl 8(%edx), %ecx
+ movl %ecx, 8(%esi)
+ movl 12(%edx), %ecx
+ adcl 12(%eax), %ecx
+ movl %ecx, 12(%esi)
+ movl 16(%edx), %ecx
+ adcl 16(%eax), %ecx
+ movl %ecx, 16(%esi)
+ movl 20(%eax), %eax
+ movl 20(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 20(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end86:
+ .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2
+
+ .globl mcl_fp_subPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre6Lbmi2,@function
+mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%ecx), %edx
+ sbbl 16(%esi), %edx
+ movl %edx, 16(%edi)
+ movl 20(%esi), %edx
+ movl 20(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 20(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end87:
+ .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2
+
+ .globl mcl_fp_shr1_6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_6Lbmi2,@function
+mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl 20(%eax), %ecx
+ movl 16(%eax), %edx
+ movl 12(%eax), %esi
+ movl 8(%eax), %edi
+ movl (%eax), %ebx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ebx
+ movl 20(%esp), %ebp
+ movl %ebx, (%ebp)
+ shrdl $1, %edi, %eax
+ movl %eax, 4(%ebp)
+ shrdl $1, %esi, %edi
+ movl %edi, 8(%ebp)
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ebp)
+ shrdl $1, %ecx, %edx
+ movl %edx, 16(%ebp)
+ shrl %ecx
+ movl %ecx, 20(%ebp)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end88:
+ .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2
+
+ .globl mcl_fp_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add6Lbmi2,@function
+mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ebp
+ movl 36(%esp), %ebx
+ addl (%ebx), %edx
+ adcl 4(%ebx), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %edi
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edi
+ movl 20(%ebx), %ebx
+ adcl 20(%eax), %ebx
+ movl 32(%esp), %eax
+ movl %edx, (%eax)
+ movl %ebp, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ecx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 44(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 44(%esp), %esi
+ sbbl 4(%esi), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %ebp
+ sbbl 16(%esi), %edi
+ sbbl 20(%esi), %ebx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movl (%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ecx)
+ movl %edx, 8(%ecx)
+ movl %ebp, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %ebx, 20(%ecx)
+.LBB89_2: # %carry
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end89:
+ .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2
+
+ .globl mcl_fp_addNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF6Lbmi2,@function
+mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 64(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ adcl 4(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl 16(%eax), %esi
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 12(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 72(%esp), %ebx
+ subl (%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl 72(%esp), %ecx
+ sbbl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 16(%ecx), %edi
+ movl %edx, %esi
+ sbbl 20(%ecx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB90_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB90_2:
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ js .LBB90_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB90_4:
+ movl %ecx, 4(%ebx)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ js .LBB90_6
+# BB#5:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB90_6:
+ movl %ecx, 8(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ js .LBB90_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB90_8:
+ movl %edx, 12(%ebx)
+ js .LBB90_10
+# BB#9:
+ movl %edi, %ecx
+.LBB90_10:
+ movl %ecx, 16(%ebx)
+ js .LBB90_12
+# BB#11:
+ movl %esi, %eax
+.LBB90_12:
+ movl %eax, 20(%ebx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end90:
+ .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2
+
+ .globl mcl_fp_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub6Lbmi2,@function
+mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ movl 44(%esp), %ecx
+ subl (%ecx), %esi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ sbbl 12(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ sbbl 20(%ecx), %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ testb $1, %cl
+ movl 36(%esp), %ebx
+ movl %esi, (%ebx)
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %edx, 20(%ebx)
+ je .LBB91_2
+# BB#1: # %carry
+ movl 48(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, (%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ adcl 8(%ecx), %edi
+ movl %eax, 4(%ebx)
+ movl 12(%ecx), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl 16(%ecx), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ebx)
+ movl 20(%ecx), %eax
+ adcl %edx, %eax
+ movl %eax, 20(%ebx)
+.LBB91_2: # %nocarry
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end91:
+ .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2
+
+ .globl mcl_fp_subNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF6Lbmi2,@function
+mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %ebx
+ movl 20(%ebx), %esi
+ movl (%ebx), %ecx
+ movl 4(%ebx), %eax
+ movl 52(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl 12(%ebx), %ecx
+ movl 8(%ebx), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 20(%ebp), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sarl $31, %ebp
+ movl %ebp, %ecx
+ addl %ecx, %ecx
+ movl %ebp, %eax
+ adcl %eax, %eax
+ shrl $31, %edx
+ orl %ecx, %edx
+ movl 56(%esp), %ebx
+ andl 4(%ebx), %eax
+ andl (%ebx), %edx
+ movl 20(%ebx), %edi
+ andl %ebp, %edi
+ movl 16(%ebx), %esi
+ andl %ebp, %esi
+ movl 12(%ebx), %ecx
+ andl %ebp, %ecx
+ andl 8(%ebx), %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 4(%ebx)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ movl %esi, 16(%ebx)
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end92:
+ .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2
+
+ .globl mcl_fpDbl_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add6Lbmi2,@function
+mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %eax
+ movl 8(%edx), %edi
+ movl (%edx), %ebx
+ addl (%ecx), %ebx
+ movl 56(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%edx), %ebx
+ adcl 4(%ecx), %ebx
+ adcl 8(%ecx), %edi
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %eax
+ movl %ebx, 4(%ebp)
+ movl %edx, %ebx
+ movl 32(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, 8(%ebp)
+ movl 20(%ebx), %edi
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ adcl %edi, %esi
+ movl 24(%ebx), %edi
+ movl %eax, 16(%ebp)
+ movl 24(%ecx), %edx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 28(%ebx), %edi
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 36(%ebx), %esi
+ movl %ebx, %edi
+ movl 36(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%edi), %esi
+ movl 40(%ecx), %edi
+ adcl %esi, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 44(%esi), %esi
+ movl 44(%ecx), %ecx
+ adcl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %edx
+ sbbl 4(%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ sbbl 12(%edx), %ebp
+ movl %edi, %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %edi, %eax
+ sbbl 20(%edx), %eax
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB93_2
+# BB#1:
+ movl %eax, %edi
+.LBB93_2:
+ testb %cl, %cl
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ jne .LBB93_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB93_4:
+ movl 56(%esp), %eax
+ movl %ecx, 24(%eax)
+ movl %edx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB93_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB93_6:
+ movl %edx, 32(%eax)
+ movl 28(%esp), %edx # 4-byte Reload
+ jne .LBB93_8
+# BB#7:
+ movl %ebp, %edx
+.LBB93_8:
+ movl %edx, 36(%eax)
+ jne .LBB93_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB93_10:
+ movl %ecx, 40(%eax)
+ movl %edi, 44(%eax)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end93:
+ .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2
+
+ .globl mcl_fpDbl_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6Lbmi2,@function
+mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 44(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %edi
+ movl 32(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %edi
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %edi
+ movl 40(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 44(%esi), %esi
+ movl 44(%edx), %eax
+ sbbl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl $0, %ebx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl 56(%esp), %eax
+ jne .LBB94_1
+# BB#2:
+ xorl %edx, %edx
+ jmp .LBB94_3
+.LBB94_1:
+ movl 20(%eax), %edx
+.LBB94_3:
+ testb %bl, %bl
+ jne .LBB94_4
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+ jmp .LBB94_6
+.LBB94_4:
+ movl (%eax), %edi
+ movl 4(%eax), %esi
+.LBB94_6:
+ jne .LBB94_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB94_9
+.LBB94_7:
+ movl 16(%eax), %ebx
+.LBB94_9:
+ jne .LBB94_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB94_12
+.LBB94_10:
+ movl 12(%eax), %ebp
+.LBB94_12:
+ jne .LBB94_13
+# BB#14:
+ xorl %eax, %eax
+ jmp .LBB94_15
+.LBB94_13:
+ movl 8(%eax), %eax
+.LBB94_15:
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %edi, 24(%ecx)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 28(%ecx)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ movl %ebx, 40(%ecx)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2
+
+ .globl mcl_fp_mulUnitPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7Lbmi2,@function
+mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %edx
+ movl 40(%esp), %edi
+ mulxl 4(%edi), %ecx, %esi
+ mulxl (%edi), %ebx, %eax
+ movl %ebx, 12(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 8(%edi), %ecx, %eax
+ adcl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ mulxl 12(%edi), %ebx, %ecx
+ adcl %eax, %ebx
+ mulxl 16(%edi), %esi, %ebp
+ adcl %ecx, %esi
+ mulxl 20(%edi), %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ mulxl 24(%edi), %edx, %edi
+ movl 36(%esp), %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %esi, 16(%eax)
+ movl %ecx, 20(%eax)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ adcl $0, %edi
+ movl %edi, 28(%eax)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2
+
+ .globl mcl_fpDbl_mulPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7Lbmi2,@function
+mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 100(%esp), %eax
+ movl (%eax), %ebp
+ mulxl %ebp, %ecx, %eax
+ movl %esi, %edx
+ mulxl %ebp, %edx, %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl 8(%edi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ mulxl %ebp, %edi, %ecx
+ adcl %eax, %edi
+ movl 12(%ebx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 20(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 24(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 4(%eax), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 64(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, %edi
+ movl 12(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 24(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl 92(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 100(%esp), %eax
+ movl 12(%eax), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %eax
+ adcl %esi, %ecx
+ movl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %esi # 4-byte Reload
+ addl %esi, 64(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl %esi, 68(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 16(%eax), %esi
+ mulxl %esi, %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %edi
+ movl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 20(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %esi, %edx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 24(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %esi, %ebp, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edi, %esi
+ addl 28(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 68(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 100(%esp), %eax
+ movl 20(%eax), %eax
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl %ebp, %edx
+ movl %edx, %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%eax)
+ movl 100(%esp), %eax
+ movl 24(%eax), %edx
+ movl 96(%esp), %eax
+ mulxl (%eax), %ebp, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ addl %esi, %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ mulxl 4(%eax), %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ movl %esi, %ebp
+ mulxl 8(%eax), %ecx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 12(%eax), %ebx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ mulxl 16(%eax), %edi, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ mulxl 20(%eax), %esi, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl 24(%eax), %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %eax
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edi, 40(%eax)
+ movl %esi, 44(%eax)
+ movl %edx, 48(%eax)
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2
+
+ .globl mcl_fpDbl_sqrPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7Lbmi2,@function
+mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 104(%esp), %ecx
+ movl (%ecx), %ebx
+ movl 4(%ecx), %eax
+ movl %eax, %edx
+ mulxl %ebx, %esi, %edi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ebx, %ebp, %edx
+ movl %ebp, 44(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %ebx, %edi, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 16(%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 20(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 24(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebx, %ecx, %ebx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ addl %edx, 72(%esp) # 4-byte Folded Spill
+ movl %eax, %edx
+ mulxl %eax, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %eax
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 76(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl %esi, 68(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 104(%esp), %esi
+ movl (%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 8(%esi), %ecx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ mulxl %ecx, %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl %ecx, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, %edx
+ mulxl %ecx, %edi, %edx
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ movl %edx, %esi
+ movl 16(%ebx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 20(%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ addl %edx, 64(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 60(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ addl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %eax, %edx, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %eax
+ adcl %ebp, %edi
+ movl %edi, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 52(%esp), %edi # 4-byte Reload
+ addl %edi, 68(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 104(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ mulxl %eax, %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ mulxl %eax, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %eax, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ movl %ecx, %edx
+ mulxl %eax, %edx, %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl %edx, %edi
+ movl 24(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 12(%esp), %eax # 4-byte Reload
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 72(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl %edx, 64(%esp) # 4-byte Folded Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl %edx, 68(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl %esi, %edx
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ecx, %edx
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl %esi, 72(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 104(%esp), %eax
+ movl 24(%eax), %edx
+ mulxl (%eax), %ecx, %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 4(%eax), %ecx, %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ mulxl 8(%eax), %ecx, %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ mulxl 12(%eax), %ebx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %esi, %ebx
+ mulxl 16(%eax), %edi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ mulxl 20(%eax), %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl %edx, %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 100(%esp), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edi, 40(%eax)
+ movl %esi, 44(%eax)
+ movl %edx, 48(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2
+
+ .globl mcl_fp_mont7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont7Lbmi2,@function
+mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %eax
+ movl 24(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %esi
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %ebx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 8(%eax), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl (%eax), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ecx, %ebp, %edx
+ addl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl %edx, 48(%esp) # 4-byte Folded Spill
+ movl 12(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull %eax, %edx
+ movl (%ebx), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ mulxl %edi, %edi, %eax
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%ebx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %esi
+ adcl %ecx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ adcl %esi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%ebx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %esi
+ adcl %ecx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %edi
+ adcl %esi, %eax
+ movl %eax, %ecx
+ movl 24(%ebx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %eax
+ adcl %edi, %edx
+ adcl $0, %eax
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl %esi, 40(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl %esi, 32(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl %esi, 28(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %esi # 4-byte Reload
+ adcl %esi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 144(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload
+ mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ addl %ebx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ mulxl 80(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ sbbl %ecx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %eax, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %edi
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 44(%esp), %ecx # 4-byte Folded Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, %ebp
+ mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl 92(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ adcl $0, %eax
+ movl %eax, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 144(%esp), %edx
+ movl 24(%edx), %edx
+ mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload
+ adcl %esi, %ebp
+ mulxl 64(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %edi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %esi, %ebx
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 60(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 108(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ mulxl 112(%esp), %ecx, %esi # 4-byte Folded Reload
+ addl %eax, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl 104(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl 100(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %edx, %edi
+ mulxl 96(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %edi, %edx
+ mulxl 88(%esp), %edi, %ebp # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl $0, %ebp
+ andl $1, 64(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 60(%esp), %eax # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ subl 108(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ sbbl 112(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sbbl 100(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ sbbl 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ sbbl 92(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ movl %eax, %ecx
+ jne .LBB98_2
+# BB#1:
+ movl 60(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB98_2:
+ movl 136(%esp), %ebx
+ movl 80(%esp), %edx # 4-byte Reload
+ movl %edx, (%ebx)
+ movl %ebx, %edx
+ testb %cl, %cl
+ movl 84(%esp), %ebx # 4-byte Reload
+ jne .LBB98_4
+# BB#3:
+ movl 64(%esp), %ebx # 4-byte Reload
+.LBB98_4:
+ movl %ebx, 4(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB98_6
+# BB#5:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB98_6:
+ movl %ecx, 8(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB98_8
+# BB#7:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB98_8:
+ movl %eax, 12(%edx)
+ jne .LBB98_10
+# BB#9:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB98_10:
+ movl %esi, 16(%edx)
+ jne .LBB98_12
+# BB#11:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB98_12:
+ movl %edi, 20(%edx)
+ jne .LBB98_14
+# BB#13:
+ movl 112(%esp), %ebp # 4-byte Reload
+.LBB98_14:
+ movl %ebp, 24(%edx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end98:
+ .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2
+
+ .globl mcl_fp_montNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF7Lbmi2,@function
+mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx
+ movl (%ecx), %ebp
+ mulxl %ebp, %ecx, %esi
+ movl %edi, %edx
+ mulxl %ebp, %edi, %edx
+ movl %edi, 96(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 8(%eax), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %edi
+ adcl %esi, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %ebx
+ adcl %edi, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %ecx
+ adcl %ebx, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %esi
+ adcl %ecx, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %eax
+ adcl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 136(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ movl %esi, %edx
+ imull %eax, %edx
+ movl (%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl 4(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl 16(%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 20(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ movl 24(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %eax
+ adcl %ebp, %edx
+ movl %edx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl %ebp, 36(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %ecx
+ mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl %eax, %ebx
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %edi, %ebp
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %eax, %ebx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl 36(%esp), %ebp # 4-byte Reload
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %edx, %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ mulxl 56(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 48(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %ebp
+ mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ movl %ebx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %ebp, %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 92(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 88(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 84(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ mulxl 60(%esp), %esi, %edi # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %ebx
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ mulxl 96(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %edx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %esi
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl %edi, %ebx
+ mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 84(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 80(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 60(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, %edi
+ mulxl 44(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 84(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ mulxl 76(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %ebx, %edi
+ movl %edi, %ebx
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 24(%eax), %edx
+ mulxl 64(%esp), %edi, %ebx # 4-byte Folded Reload
+ mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %ecx
+ mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 64(%esp), %edi # 4-byte Reload
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ imull %eax, %edx
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ addl %eax, %esi
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ mulxl 92(%esp), %eax, %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ecx, %edx
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 76(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ subl 100(%esp), %ecx # 4-byte Folded Reload
+ sbbl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl 84(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl 80(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ js .LBB99_2
+# BB#1:
+ movl %ecx, %edx
+.LBB99_2:
+ movl 124(%esp), %esi
+ movl %edx, (%esi)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB99_4
+# BB#3:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB99_4:
+ movl %eax, 4(%esi)
+ movl 68(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ js .LBB99_6
+# BB#5:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB99_6:
+ movl %ebp, 8(%esi)
+ movl %esi, %ebp
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB99_8
+# BB#7:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB99_8:
+ movl %esi, 12(%ebp)
+ js .LBB99_10
+# BB#9:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB99_10:
+ movl %edx, 16(%ebp)
+ js .LBB99_12
+# BB#11:
+ movl %ebx, %ecx
+.LBB99_12:
+ movl %ecx, 20(%ebp)
+ js .LBB99_14
+# BB#13:
+ movl %edi, %eax
+.LBB99_14:
+ movl %eax, 24(%ebp)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end99:
+ .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2
+
+ .globl mcl_fp_montRed7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed7Lbmi2,@function
+mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %edi
+ movl -4(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl (%edi), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ imull %ecx, %edx
+ movl 24(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 20(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 16(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 4(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ mulxl %esi, %ebx, %esi
+ movl %ebx, 64(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ adcl %ebp, %esi
+ movl %esi, %ebp
+ movl 12(%edi), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %edx
+ adcl %ecx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 4(%eax), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 8(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 12(%eax), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 16(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 24(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 28(%eax), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%eax), %ecx
+ movl 48(%eax), %edx
+ movl 44(%eax), %esi
+ movl 40(%eax), %edi
+ movl 36(%eax), %ebp
+ movl 32(%eax), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ mulxl 104(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %edi, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ebx, %ebp # 4-byte Folded Reload
+ mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload
+ mulxl 92(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %eax, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ mulxl 88(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebp, %esi
+ mulxl 104(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %ebx # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl %esi, %ebx
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, 20(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ movl %ebp, %edi
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 84(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 104(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, 20(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl %edi, 44(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %edi # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %ecx, %edi
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl %ebp, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload
+ addl %ecx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 88(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %esi
+ mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %edi, %ebp
+ mulxl 100(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 104(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl %edi, %ebx
+ mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edx, %ebp
+ subl 92(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 96(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %eax, %ecx
+ sbbl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB100_2
+# BB#1:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB100_2:
+ movl 128(%esp), %edx
+ movl %ebp, (%edx)
+ movl %esi, %eax
+ testb %al, %al
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB100_4
+# BB#3:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB100_4:
+ movl %ebp, 4(%edx)
+ movl %ecx, %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB100_6
+# BB#5:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB100_6:
+ movl %ecx, 8(%edx)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB100_8
+# BB#7:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB100_8:
+ movl %ecx, 12(%edx)
+ jne .LBB100_10
+# BB#9:
+ movl 96(%esp), %ebx # 4-byte Reload
+.LBB100_10:
+ movl %ebx, 16(%edx)
+ jne .LBB100_12
+# BB#11:
+ movl 104(%esp), %edi # 4-byte Reload
+.LBB100_12:
+ movl %edi, 20(%edx)
+ jne .LBB100_14
+# BB#13:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB100_14:
+ movl %eax, 24(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end100:
+ .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2
+
+ .globl mcl_fp_addPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre7Lbmi2,@function
+mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl 24(%eax), %eax
+ movl 24(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 24(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end101:
+ .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2
+
+ .globl mcl_fp_subPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre7Lbmi2,@function
+mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl %esi, 16(%ebp)
+ movl %edx, 20(%ebp)
+ movl 24(%edi), %edx
+ movl 24(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 24(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end102:
+ .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2
+
+ .globl mcl_fp_shr1_7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_7Lbmi2,@function
+mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 20(%esi)
+ shrl %eax
+ movl %eax, 24(%esi)
+ popl %esi
+ retl
+.Lfunc_end103:
+ .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2
+
+ .globl mcl_fp_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add7Lbmi2,@function
+mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %ebp
+ movl (%ebp), %eax
+ movl 4(%ebp), %edi
+ movl 44(%esp), %ecx
+ addl (%ecx), %eax
+ adcl 4(%ecx), %edi
+ movl 8(%ebp), %esi
+ adcl 8(%ecx), %esi
+ movl 12(%ecx), %edx
+ movl 16(%ecx), %ebx
+ adcl 12(%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 20(%ecx), %ebp
+ adcl 20(%ebx), %ebp
+ movl 24(%ecx), %edx
+ adcl 24(%ebx), %edx
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, 4(%ecx)
+ movl %esi, 8(%ecx)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%ecx)
+ movl %ebp, 20(%ecx)
+ movl %edx, 24(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %eax
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, %edi
+ sbbl 8(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, %esi
+ sbbl 20(%edi), %ebp
+ sbbl 24(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl 40(%esp), %eax
+ movl %eax, %ebx
+ movl %ecx, (%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl %esi, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edx, 24(%ebx)
+.LBB104_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end104:
+ .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2
+
+ .globl mcl_fp_addNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF7Lbmi2,@function
+mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 76(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ sbbl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%eax), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ sbbl 16(%eax), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 20(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 24(%eax), %edi
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB105_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB105_2:
+ movl 72(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB105_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB105_4:
+ movl %eax, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %ebx # 4-byte Reload
+ js .LBB105_6
+# BB#5:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB105_6:
+ movl 72(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl %eax, %ebx
+ js .LBB105_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB105_8:
+ movl %esi, 12(%ebx)
+ js .LBB105_10
+# BB#9:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB105_10:
+ movl %edx, 16(%ebx)
+ js .LBB105_12
+# BB#11:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB105_12:
+ movl %ecx, 20(%ebx)
+ js .LBB105_14
+# BB#13:
+ movl %edi, %ebp
+.LBB105_14:
+ movl %ebp, 24(%ebx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end105:
+ .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2
+
+ .globl mcl_fp_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub7Lbmi2,@function
+mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ sbbl 12(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ sbbl 16(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebp
+ sbbl 20(%esi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edi), %edi
+ sbbl 24(%esi), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 44(%esp), %ebx
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, (%ebx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %ecx, 12(%ebx)
+ movl %eax, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edi, 24(%ebx)
+ je .LBB106_2
+# BB#1: # %carry
+ movl 56(%esp), %ebp
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%ebp), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%ebp), %edx
+ movl %edx, 4(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 8(%ebp), %ecx
+ movl 12(%ebp), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%ebp), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl 20(%ebp), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 24(%ebp), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+.LBB106_2: # %nocarry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end106:
+ .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2
+
+ .globl mcl_fp_subNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF7Lbmi2,@function
+mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 60(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl 20(%eax), %esi
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 64(%esp), %edx
+ andl (%edx), %eax
+ movl 24(%edx), %esi
+ andl %ecx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ecx, %ebx
+ movl 16(%edx), %edi
+ andl %ecx, %edi
+ movl 12(%edx), %esi
+ andl %ecx, %esi
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ andl %ecx, %edx
+ movl 64(%esp), %ebp
+ andl 4(%ebp), %ecx
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl %ecx, 4(%eax)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 8(%eax)
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%eax)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end107:
+ .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2
+
+ .globl mcl_fpDbl_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add7Lbmi2,@function
+mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %esi
+ movl 68(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %ecx
+ movl 8(%esi), %eax
+ movl (%esi), %ebx
+ addl (%edx), %ebx
+ movl 64(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%esi), %ebx
+ adcl 4(%edx), %ebx
+ adcl 8(%edx), %eax
+ adcl 12(%esi), %edi
+ adcl 16(%esi), %ecx
+ movl %ebx, 4(%ebp)
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%ebp)
+ movl 20(%ebx), %eax
+ movl %edi, 12(%ebp)
+ movl 20(%edx), %edi
+ adcl %eax, %edi
+ movl 24(%ebx), %eax
+ movl %ecx, 16(%ebp)
+ movl 24(%edx), %ecx
+ adcl %eax, %ecx
+ movl 28(%ebx), %eax
+ movl %edi, 20(%ebp)
+ movl 28(%edx), %edi
+ adcl %eax, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ movl %ecx, 24(%ebp)
+ movl 32(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 40(%ebx), %ecx
+ movl 40(%edx), %eax
+ adcl %ecx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%ebx), %ebp
+ movl 44(%edx), %ecx
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%ebx), %ebp
+ movl %ebx, %eax
+ movl 48(%edx), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 52(%eax), %eax
+ movl 52(%edx), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 76(%esp), %eax
+ subl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 76(%esp), %edi
+ sbbl 8(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebx
+ sbbl 24(%edi), %ebp
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB108_2
+# BB#1:
+ movl %ebp, 32(%esp) # 4-byte Spill
+.LBB108_2:
+ testb %dl, %dl
+ movl 20(%esp), %ecx # 4-byte Reload
+ jne .LBB108_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB108_4:
+ movl 64(%esp), %eax
+ movl %ecx, 28(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB108_6
+# BB#5:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB108_6:
+ movl %ecx, 40(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ jne .LBB108_8
+# BB#7:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB108_8:
+ movl %edx, 44(%eax)
+ jne .LBB108_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB108_10:
+ movl %ecx, 48(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end108:
+ .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2
+
+ .globl mcl_fpDbl_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7Lbmi2,@function
+mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 60(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %edx
+ movl 8(%esi), %ebx
+ sbbl 8(%edi), %ebx
+ movl 52(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %ebx, 8(%ecx)
+ movl 20(%edi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %ebx, %eax
+ movl 24(%edi), %ebx
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %ebx, %edx
+ movl 28(%edi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %ebx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ movl 36(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 40(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ movl 44(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 48(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ movl 52(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 64(%esp), %esi
+ jne .LBB109_1
+# BB#2:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB109_3
+.LBB109_1:
+ movl 24(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB109_3:
+ testb %al, %al
+ jne .LBB109_4
+# BB#5:
+ movl $0, %edi
+ movl $0, %eax
+ jmp .LBB109_6
+.LBB109_4:
+ movl (%esi), %eax
+ movl 4(%esi), %edi
+.LBB109_6:
+ jne .LBB109_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB109_9
+.LBB109_7:
+ movl 20(%esi), %ebx
+.LBB109_9:
+ jne .LBB109_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB109_12
+.LBB109_10:
+ movl 16(%esi), %ebp
+.LBB109_12:
+ jne .LBB109_13
+# BB#14:
+ movl $0, %edx
+ jmp .LBB109_15
+.LBB109_13:
+ movl 12(%esi), %edx
+.LBB109_15:
+ jne .LBB109_16
+# BB#17:
+ xorl %esi, %esi
+ jmp .LBB109_18
+.LBB109_16:
+ movl 8(%esi), %esi
+.LBB109_18:
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 32(%ecx)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv256x32,@function
+.LmulPv256x32: # @mulPv256x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl %edx, %eax
+ movl 40(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 16(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 12(%eax), %ebx, %esi
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ mulxl 16(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 20(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 24(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl %ebx, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %esi, 20(%ecx)
+ movl %edx, 24(%ecx)
+ movl 40(%esp), %edx
+ mulxl 28(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl $0, %edx
+ movl %edx, 32(%ecx)
+ movl %ecx, %eax
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+
+ .globl mcl_fp_mulUnitPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8Lbmi2,@function
+mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ calll .L111$pb
+.L111$pb:
+ popl %ebx
+.Ltmp2:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx
+ movl 88(%esp), %eax
+ movl %eax, (%esp)
+ leal 24(%esp), %ecx
+ movl 84(%esp), %edx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl 40(%esp), %edi
+ movl 36(%esp), %ebx
+ movl 32(%esp), %ebp
+ movl 24(%esp), %edx
+ movl 28(%esp), %ecx
+ movl 80(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %esi, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2
+
+ .globl mcl_fpDbl_mulPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8Lbmi2,@function
+mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L112$pb
+.L112$pb:
+ popl %ebx
+.Ltmp3:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ leal 16(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 16(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 32(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl 24(%edi), %esi
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ addl 16(%edi), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ addl 16(%edi), %eax
+ adcl 20(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl 24(%edi), %edx
+ adcl 8(%edi), %edx
+ movl 28(%edi), %ecx
+ adcl 12(%edi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ jb .LBB112_2
+# BB#1:
+ xorl %esi, %esi
+ xorl %ebx, %ebx
+.LBB112_2:
+ movl %ebx, -112(%ebp) # 4-byte Spill
+ movl %esi, -104(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 28(%esi), %edi
+ movl -80(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%esi), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ movl %ecx, -84(%ebp) # 4-byte Spill
+ movl %edx, %edi
+ movl -124(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -80(%ebp) # 4-byte Spill
+ movl %eax, -92(%ebp) # 4-byte Spill
+ jb .LBB112_4
+# BB#3:
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+ movl $0, %edi
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -92(%ebp) # 4-byte Folded Spill
+.LBB112_4:
+ movl %edi, -88(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -60(%ebp)
+ movl -100(%ebp), %edi # 4-byte Reload
+ movl %edi, -56(%ebp)
+ movl -108(%ebp), %esi # 4-byte Reload
+ movl %esi, -52(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ebx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %ecx, -64(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %esi # 4-byte Reload
+ movl %esi, -48(%ebp)
+ movl -128(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB112_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+.LBB112_6:
+ sbbl %eax, %eax
+ leal -76(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -44(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl %edi, %eax
+ movl -92(%ebp), %edi # 4-byte Reload
+ addl -112(%ebp), %edi # 4-byte Folded Reload
+ adcl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl %eax, -88(%ebp) # 4-byte Folded Spill
+ adcl %esi, -84(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -92(%ebp) # 4-byte Spill
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ addl -28(%ebp), %edi
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ adcl %esi, -92(%ebp) # 4-byte Folded Spill
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl 16(%esi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 20(%esi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%esi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%esi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%esi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 52(%esi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%esi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ adcl -132(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 28(%esi)
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -136(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -88(%ebp), %ecx # 4-byte Reload
+ adcl -128(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -140(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -144(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl %ecx, 48(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%esi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%esi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2
+
+ .globl mcl_fpDbl_sqrPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8Lbmi2,@function
+mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L113$pb
+.L113$pb:
+ popl %ebx
+.Ltmp4:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ leal 16(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 32(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 16(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %edx
+ addl %esi, %esi
+ movl %esi, -84(%ebp) # 4-byte Spill
+ movl %ecx, %esi
+ adcl %esi, %esi
+ movl %esi, -80(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -88(%ebp) # 4-byte Spill
+ movl 24(%edi), %esi
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 8(%edi), %esi
+ movl 28(%edi), %edx
+ adcl 12(%edi), %edx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %ebx
+ sbbl %edi, %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB113_2
+# BB#1:
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+.LBB113_2:
+ movl %esi, %ebx
+ movl -88(%ebp), %edi # 4-byte Reload
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ adcl %ebx, %ebx
+ movl %edx, %edi
+ adcl %edi, %edi
+ movl -104(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_4
+# BB#3:
+ xorl %edi, %edi
+ xorl %ebx, %ebx
+.LBB113_4:
+ movl %ebx, -88(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl %ecx, -56(%ebp)
+ movl %esi, -52(%ebp)
+ movl %edx, -48(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %esi, -68(%ebp)
+ movl %edx, -64(%ebp)
+ movl -100(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_5
+# BB#6:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB113_7
+.LBB113_5:
+ shrl $31, %edx
+ movl %edx, -100(%ebp) # 4-byte Spill
+.LBB113_7:
+ leal -76(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -44(%ebp), %eax
+ movl %eax, (%esp)
+ movl -92(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl -84(%ebp), %eax # 4-byte Reload
+ addl -28(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ adcl -100(%ebp), %esi # 4-byte Folded Reload
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %edi
+ subl (%edi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%edi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%edi), %edx
+ movl 16(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ movl 20(%edi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%edi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%edi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 32(%edi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%edi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%edi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%edi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ movl 52(%edi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%edi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%edi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ movl -80(%ebp), %ecx # 4-byte Reload
+ adcl -136(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -128(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -140(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ adcl -144(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %esi, 48(%edi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%edi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%edi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%edi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2
+
+ .globl mcl_fp_mont8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont8Lbmi2,@function
+mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L114$pb
+.L114$pb:
+ popl %ebx
+.Ltmp5:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 60(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 612(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ andl $1, %ebp
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ addl 504(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 524(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 464(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 480(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 436(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 408(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ adcl 348(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 372(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 304(%esp), %edi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 316(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 272(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 224(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 228(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ adcl 236(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 204(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 144(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 172(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ adcl 108(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 116(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 128(%esp), %edi
+ adcl 132(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ andl $1, %esi
+ addl 64(%esp), %ebp
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %eax, %edx
+ movl 732(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %ebx, %ecx
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ movl %esi, %ecx
+ jne .LBB114_2
+# BB#1:
+ movl %edx, %ebp
+.LBB114_2:
+ movl 720(%esp), %edx
+ movl %ebp, (%edx)
+ testb %cl, %cl
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB114_4
+# BB#3:
+ movl %eax, %ebp
+.LBB114_4:
+ movl %ebp, 4(%edx)
+ jne .LBB114_6
+# BB#5:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB114_6:
+ movl %ebx, 8(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ jne .LBB114_8
+# BB#7:
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+.LBB114_8:
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ jne .LBB114_10
+# BB#9:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB114_10:
+ movl %edi, 16(%edx)
+ jne .LBB114_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB114_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB114_14
+# BB#13:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB114_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB114_16
+# BB#15:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB114_16:
+ movl %eax, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end114:
+ .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2
+
+ .globl mcl_fp_montNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF8Lbmi2,@function
+mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L115$pb
+.L115$pb:
+ popl %ebx
+.Ltmp6:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 640(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 616(%esp), %ecx
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 604(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 536(%esp), %ecx
+ addl 504(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 464(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 472(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ adcl 488(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 456(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 424(%esp), %edx
+ adcl 428(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 384(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ adcl 416(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 376(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ adcl 368(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 304(%esp), %ebp
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 296(%esp), %edx
+ movl %ebp, %ecx
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 224(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ adcl 240(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 216(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 192(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 144(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 156(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 160(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 136(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 116(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl $0, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 64(%esp), %esi
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 732(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ecx
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 16(%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 20(%eax), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%eax), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ sbbl 28(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ testl %edi, %edi
+ js .LBB115_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB115_2:
+ movl 720(%esp), %edx
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, (%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB115_4
+# BB#3:
+ movl %ecx, %eax
+.LBB115_4:
+ movl %eax, 4(%edx)
+ js .LBB115_6
+# BB#5:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB115_6:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB115_8
+# BB#7:
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+.LBB115_8:
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edx)
+ js .LBB115_10
+# BB#9:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB115_10:
+ movl %edi, 16(%edx)
+ js .LBB115_12
+# BB#11:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB115_12:
+ movl %ebp, 20(%edx)
+ js .LBB115_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB115_14:
+ movl %eax, 24(%edx)
+ js .LBB115_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB115_16:
+ movl %ecx, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end115:
+ .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2
+
+ .globl mcl_fp_montRed8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed8Lbmi2,@function
+mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L116$pb
+.L116$pb:
+ popl %ebx
+.Ltmp7:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx
+ movl 456(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 60(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 56(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl 12(%eax), %edi
+ movl 8(%eax), %esi
+ movl (%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 392(%esp), %ecx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ adcl 400(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 408(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 352(%esp), %edi
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 384(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 312(%esp), %edi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 316(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 272(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 232(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 236(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 252(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 192(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 204(%esp), %edi
+ adcl 208(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 152(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ adcl 160(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 172(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 112(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 136(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %edx
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ sbbl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB116_2
+# BB#1:
+ movl %edx, %ecx
+.LBB116_2:
+ movl 448(%esp), %edx
+ movl %ecx, (%edx)
+ movl %edi, %ecx
+ testb %cl, %cl
+ jne .LBB116_4
+# BB#3:
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB116_4:
+ movl 108(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB116_6
+# BB#5:
+ movl %ebp, %eax
+.LBB116_6:
+ movl %eax, 8(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB116_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB116_8:
+ movl %ebp, 12(%edx)
+ movl 100(%esp), %ebx # 4-byte Reload
+ jne .LBB116_10
+# BB#9:
+ movl 68(%esp), %ebx # 4-byte Reload
+.LBB116_10:
+ movl %ebx, 16(%edx)
+ movl 80(%esp), %edi # 4-byte Reload
+ jne .LBB116_12
+# BB#11:
+ movl 72(%esp), %edi # 4-byte Reload
+.LBB116_12:
+ movl %edi, 20(%edx)
+ movl 88(%esp), %esi # 4-byte Reload
+ jne .LBB116_14
+# BB#13:
+ movl 92(%esp), %esi # 4-byte Reload
+.LBB116_14:
+ movl %esi, 24(%edx)
+ jne .LBB116_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB116_16:
+ movl %eax, 28(%edx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end116:
+ .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2
+
+ .globl mcl_fp_addPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre8Lbmi2,@function
+mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl 24(%eax), %edi
+ movl %edx, 16(%ebx)
+ movl 24(%esi), %edx
+ adcl %edi, %edx
+ movl %ecx, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl 28(%eax), %eax
+ movl 28(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 28(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end117:
+ .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2
+
+ .globl mcl_fp_subPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre8Lbmi2,@function
+mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl 24(%edi), %ebx
+ movl %esi, 16(%ebp)
+ movl 24(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl 28(%edi), %edx
+ movl 28(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 28(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end118:
+ .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2
+
+ .globl mcl_fp_shr1_8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_8Lbmi2,@function
+mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 24(%esi)
+ shrl %eax
+ movl %eax, 28(%esi)
+ popl %esi
+ retl
+.Lfunc_end119:
+ .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2
+
+ .globl mcl_fp_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add8Lbmi2,@function
+mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%edx), %esi
+ movl 16(%edx), %eax
+ adcl 12(%edi), %esi
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ adcl 20(%edi), %ecx
+ movl 24(%edx), %ebx
+ adcl 24(%edi), %ebx
+ movl 28(%edx), %edi
+ movl 48(%esp), %edx
+ adcl 28(%edx), %edi
+ movl 40(%esp), %edx
+ movl %ebp, (%edx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%edx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %eax, 16(%edx)
+ movl %ecx, 20(%edx)
+ movl %ebx, 24(%edx)
+ movl %edi, 28(%edx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 52(%esp), %edx
+ movl 8(%esp), %ebp # 4-byte Reload
+ subl (%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 4(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 8(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ sbbl 12(%ebp), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 4(%esp), %edx # 4-byte Reload
+ sbbl 16(%ebp), %edx
+ movl %edx, %esi
+ sbbl 20(%ebp), %ecx
+ sbbl 24(%ebp), %ebx
+ sbbl 28(%ebp), %edi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebp)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl %esi, 16(%ebp)
+ movl %ecx, 20(%ebp)
+ movl %ebx, 24(%ebp)
+ movl %edi, 28(%ebp)
+.LBB120_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end120:
+ .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2
+
+ .globl mcl_fp_addNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF8Lbmi2,@function
+mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 80(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%ebx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %esi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 12(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%ebx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 24(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 28(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, %eax
+ subl (%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ testl %esi, %esi
+ js .LBB121_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB121_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB121_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB121_4:
+ movl %eax, 4(%ebx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ js .LBB121_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB121_6:
+ movl %edi, 8(%ebx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB121_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB121_8:
+ movl %eax, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB121_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB121_10:
+ movl %edx, 16(%ebx)
+ js .LBB121_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB121_12:
+ movl %ecx, 20(%ebx)
+ js .LBB121_14
+# BB#13:
+ movl %ebp, %eax
+.LBB121_14:
+ movl %eax, 24(%ebx)
+ js .LBB121_16
+# BB#15:
+ movl %esi, %edi
+.LBB121_16:
+ movl %edi, 28(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end121:
+ .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2
+
+ .globl mcl_fp_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub8Lbmi2,@function
+mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 56(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %edi
+ sbbl 24(%ebp), %edi
+ movl 28(%esi), %esi
+ sbbl 28(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ebx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl %edi, 24(%ebx)
+ movl %esi, 28(%ebx)
+ je .LBB122_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 60(%esp), %esi
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 8(%esi), %ebp
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl 24(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+.LBB122_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end122:
+ .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2
+
+ .globl mcl_fp_subNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF8Lbmi2,@function
+mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 68(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl 24(%eax), %esi
+ movl 20(%eax), %edi
+ movl 16(%eax), %ebx
+ movl 12(%eax), %ebp
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sarl $31, %edi
+ movl 72(%esp), %ebp
+ movl 28(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%ebp), %ebx
+ andl %edi, %ebx
+ movl 16(%ebp), %esi
+ andl %edi, %esi
+ movl 12(%ebp), %edx
+ andl %edi, %edx
+ movl 8(%ebp), %ecx
+ andl %edi, %ecx
+ movl 4(%ebp), %eax
+ andl %edi, %eax
+ andl (%ebp), %edi
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 4(%ebp)
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebp)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%ebp)
+ movl %eax, 24(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end123:
+ .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2
+
+ .globl mcl_fpDbl_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add8Lbmi2,@function
+mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 80(%esp), %ebp
+ addl (%ebp), %esi
+ adcl 4(%ebp), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebp), %edi
+ movl 12(%ebp), %ebx
+ movl 76(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebp), %esi
+ adcl 12(%ecx), %ebx
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 40(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebx, 12(%eax)
+ movl 20(%ebp), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebp), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebp), %ebx
+ adcl %edx, %ebx
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %ebx, 28(%eax)
+ movl 36(%ebp), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 44(%ebp), %edi
+ adcl %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl 48(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl 52(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%ecx), %ecx
+ movl 60(%ebp), %ebp
+ adcl %ecx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %edx
+ subl (%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ sbbl 4(%eax), %ebx
+ movl %eax, %edx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ movl %edx, %ebx
+ sbbl 8(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 16(%ebx), %eax
+ sbbl 20(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB124_2
+# BB#1:
+ movl %eax, %edi
+.LBB124_2:
+ testb %cl, %cl
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB124_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB124_4:
+ movl 76(%esp), %eax
+ movl %ecx, 32(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 32(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB124_6
+# BB#5:
+ movl 4(%esp), %ebx # 4-byte Reload
+.LBB124_6:
+ movl %ebx, 36(%eax)
+ jne .LBB124_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB124_8:
+ movl %esi, 40(%eax)
+ movl 36(%esp), %esi # 4-byte Reload
+ jne .LBB124_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB124_10:
+ movl %edx, 44(%eax)
+ movl %edi, 48(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB124_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB124_12:
+ movl %esi, 52(%eax)
+ jne .LBB124_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB124_14:
+ movl %edx, 56(%eax)
+ jne .LBB124_16
+# BB#15:
+ movl %ebp, %ecx
+.LBB124_16:
+ movl %ecx, 60(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end124:
+ .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2
+
+ .globl mcl_fpDbl_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8Lbmi2,@function
+mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 68(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 60(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%ebx), %edx
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 40(%ebx), %eax
+ movl 40(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%ebx), %eax
+ movl 44(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebx), %eax
+ movl 48(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%ebx), %eax
+ movl 52(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 72(%esp), %ebx
+ jne .LBB125_1
+# BB#2:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB125_3
+.LBB125_1:
+ movl 28(%ebx), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+.LBB125_3:
+ testb %al, %al
+ jne .LBB125_4
+# BB#5:
+ movl $0, %ebp
+ movl $0, %eax
+ jmp .LBB125_6
+.LBB125_4:
+ movl (%ebx), %eax
+ movl 4(%ebx), %ebp
+.LBB125_6:
+ jne .LBB125_7
+# BB#8:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB125_9
+.LBB125_7:
+ movl 24(%ebx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB125_9:
+ jne .LBB125_10
+# BB#11:
+ movl $0, %edx
+ jmp .LBB125_12
+.LBB125_10:
+ movl 20(%ebx), %edx
+.LBB125_12:
+ jne .LBB125_13
+# BB#14:
+ movl $0, %esi
+ jmp .LBB125_15
+.LBB125_13:
+ movl 16(%ebx), %esi
+.LBB125_15:
+ jne .LBB125_16
+# BB#17:
+ movl $0, %edi
+ jmp .LBB125_18
+.LBB125_16:
+ movl 12(%ebx), %edi
+.LBB125_18:
+ jne .LBB125_19
+# BB#20:
+ xorl %ebx, %ebx
+ jmp .LBB125_21
+.LBB125_19:
+ movl 8(%ebx), %ebx
+.LBB125_21:
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ movl %eax, 56(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv288x32,@function
+.LmulPv288x32: # @mulPv288x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl %edx, %eax
+ movl 44(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 16(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 20(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 24(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 28(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl %ebx, 16(%ecx)
+ movl %edi, 20(%ecx)
+ movl %esi, 24(%ecx)
+ movl %edx, 28(%ecx)
+ movl 44(%esp), %edx
+ mulxl 32(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl $0, %edx
+ movl %edx, 36(%ecx)
+ movl %ecx, %eax
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+
+ .globl mcl_fp_mulUnitPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9Lbmi2,@function
+mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L127$pb
+.L127$pb:
+ popl %ebx
+.Ltmp8:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv288x32
+ movl 68(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl 48(%esp), %ebx
+ movl 44(%esp), %ebp
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ebp, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %edi, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2
+
+ .globl mcl_fpDbl_mulPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9Lbmi2,@function
+mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L128$pb
+.L128$pb:
+ popl %esi
+.Ltmp9:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 452(%esp), %edx
+ movl %edx, %ebp
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %edi
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %ebp, %edx
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 240(%esp), %edi
+ movl 236(%esp), %ebp
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 204(%esp), %edi
+ movl 200(%esp), %ebx
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx
+ movl 160(%esp), %edi
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 12(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %ebp
+ movl 132(%esp), %edi
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2
+
+ .globl mcl_fpDbl_sqrPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9Lbmi2,@function
+mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L129$pb
+.L129$pb:
+ popl %ebx
+.Ltmp10:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %ebp
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esi), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 244(%esp), %edi
+ movl 240(%esp), %ebp
+ movl 236(%esp), %esi
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebx
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %edi
+ movl 160(%esp), %ebp
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 4(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2
+
+ .globl mcl_fp_mont9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont9Lbmi2,@function
+mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L130$pb
+.L130$pb:
+ popl %ebx
+.Ltmp11:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %ebp
+ movl 756(%esp), %esi
+ movl %ebp, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %ebp
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 748(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 672(%esp), %esi
+ adcl 676(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 708(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 632(%esp), %esi
+ adcl 636(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 660(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 592(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 624(%esp), %esi
+ adcl 628(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 552(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 580(%esp), %edi
+ adcl 584(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ adcl 548(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 472(%esp), %ebp
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 484(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 500(%esp), %esi
+ adcl 504(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 452(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 392(%esp), %ebp
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 408(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 428(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 352(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 384(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 312(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 336(%esp), %esi
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 344(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 292(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 296(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 232(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 240(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 260(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ adcl 196(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 200(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 152(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 164(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 172(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl 136(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %edi
+ addl 72(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 828(%esp), %ebx
+ subl (%ebx), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebx), %edx
+ movl %esi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 12(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ movl 60(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB130_2
+# BB#1:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB130_2:
+ testb %bl, %bl
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB130_4
+# BB#3:
+ movl %eax, %esi
+.LBB130_4:
+ movl 816(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB130_6
+# BB#5:
+ movl %edx, %eax
+.LBB130_6:
+ movl %eax, 4(%ebp)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB130_8
+# BB#7:
+ movl %ecx, %eax
+.LBB130_8:
+ movl %eax, 8(%ebp)
+ movl 44(%esp), %eax # 4-byte Reload
+ jne .LBB130_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB130_10:
+ movl %eax, 12(%ebp)
+ jne .LBB130_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB130_12:
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB130_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB130_14:
+ movl %eax, 20(%ebp)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB130_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB130_16:
+ movl %eax, 24(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB130_18
+# BB#17:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB130_18:
+ movl %eax, 32(%ebp)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end130:
+ .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2
+
+ .globl mcl_fp_montNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF9Lbmi2,@function
+mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L131$pb
+.L131$pb:
+ popl %ebx
+.Ltmp12:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %esi
+ movl 756(%esp), %ebp
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %esi
+ adcl 716(%esp), %ebp
+ adcl 720(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 740(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 708(%esp), %eax
+ addl 672(%esp), %ebp
+ adcl 676(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 696(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 632(%esp), %ebp
+ adcl 636(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 656(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 628(%esp), %eax
+ addl 592(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 612(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 616(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 620(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 624(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 552(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 572(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 576(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 548(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 512(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 516(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 540(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 544(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 500(%esp), %edi
+ movl %edi, %ebp
+ adcl 504(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 468(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 436(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 444(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 448(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 452(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 460(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 464(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 392(%esp), %ebp
+ adcl 396(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 428(%esp), %esi
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 388(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 360(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 364(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 372(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 376(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 380(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 312(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 324(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 340(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 308(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 292(%esp), %ebp
+ adcl 296(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 232(%esp), %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 252(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 228(%esp), %ebp
+ movl %esi, %ecx
+ addl 192(%esp), %ecx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 196(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 152(%esp), %edi
+ adcl 156(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 164(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 148(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ adcl 116(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 132(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 72(%esp), %edi
+ movl 44(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 80(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 828(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ebx
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 12(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 16(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 32(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB131_2
+# BB#1:
+ movl %edx, %eax
+.LBB131_2:
+ movl 816(%esp), %edx
+ movl %eax, (%edx)
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB131_4
+# BB#3:
+ movl %ebx, %esi
+.LBB131_4:
+ movl %esi, 4(%edx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB131_6
+# BB#5:
+ movl %ecx, %edi
+.LBB131_6:
+ movl %edi, 8(%edx)
+ js .LBB131_8
+# BB#7:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB131_8:
+ movl %ebp, 12(%edx)
+ js .LBB131_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB131_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB131_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB131_12:
+ movl %eax, 20(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB131_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB131_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB131_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB131_16:
+ movl %eax, 28(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB131_18
+# BB#17:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB131_18:
+ movl %eax, 32(%edx)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end131:
+ .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2
+
+ .globl mcl_fp_montRed9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed9Lbmi2,@function
+mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $492, %esp # imm = 0x1EC
+ calll .L132$pb
+.L132$pb:
+ popl %ebx
+.Ltmp13:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx
+ movl 520(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 68(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 60(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 56(%eax), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 448(%esp), %ecx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 448(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 460(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 464(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 412(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 368(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 404(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 328(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 288(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 248(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 264(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 208(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 212(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 168(%esp), %ebp
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 184(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 128(%esp), %edi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl 136(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 20(%esp), %edi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 16(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ movl %ebp, %edx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB132_2
+# BB#1:
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB132_2:
+ testb %dl, %dl
+ movl 120(%esp), %ecx # 4-byte Reload
+ jne .LBB132_4
+# BB#3:
+ movl %edi, %ecx
+.LBB132_4:
+ movl 512(%esp), %edi
+ movl %ecx, (%edi)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB132_6
+# BB#5:
+ movl %eax, 124(%esp) # 4-byte Spill
+.LBB132_6:
+ movl 124(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB132_8
+# BB#7:
+ movl %esi, %eax
+.LBB132_8:
+ movl %eax, 8(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB132_10
+# BB#9:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB132_10:
+ movl %ebp, 16(%edi)
+ movl 112(%esp), %ebx # 4-byte Reload
+ jne .LBB132_12
+# BB#11:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB132_12:
+ movl %ebx, 20(%edi)
+ movl 100(%esp), %esi # 4-byte Reload
+ jne .LBB132_14
+# BB#13:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB132_14:
+ movl %esi, 24(%edi)
+ jne .LBB132_16
+# BB#15:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB132_16:
+ movl %ecx, 28(%edi)
+ jne .LBB132_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+.LBB132_18:
+ movl %eax, 32(%edi)
+ addl $492, %esp # imm = 0x1EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end132:
+ .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2
+
+ .globl mcl_fp_addPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre9Lbmi2,@function
+mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl 32(%eax), %eax
+ movl 32(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 32(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end133:
+ .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2
+
+ .globl mcl_fp_subPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre9Lbmi2,@function
+mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 24(%ebp)
+ movl %esi, 28(%ebp)
+ movl 32(%edx), %edx
+ movl 32(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 32(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end134:
+ .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2
+
+ .globl mcl_fp_shr1_9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_9Lbmi2,@function
+mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 28(%esi)
+ shrl %eax
+ movl %eax, 32(%esi)
+ popl %esi
+ retl
+.Lfunc_end135:
+ .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2
+
+ .globl mcl_fp_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add9Lbmi2,@function
+mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, %ebp
+ adcl 4(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ adcl 20(%edi), %esi
+ movl 24(%ebx), %edx
+ adcl 24(%edi), %edx
+ movl 28(%ebx), %ecx
+ adcl 28(%edi), %ecx
+ movl 32(%ebx), %eax
+ adcl 32(%edi), %eax
+ movl 40(%esp), %edi
+ movl %ebp, (%edi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%edi)
+ movl %esi, 20(%edi)
+ movl %edx, 24(%edi)
+ movl %ecx, 28(%edi)
+ movl %eax, 32(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %edi
+ subl (%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edi), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edi), %ebp
+ sbbl 20(%edi), %esi
+ sbbl 24(%edi), %edx
+ sbbl 28(%edi), %ecx
+ sbbl 32(%edi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %esi, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl %ecx, 28(%ebx)
+ movl %eax, 32(%ebx)
+.LBB136_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end136:
+ .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2
+
+ .globl mcl_fp_addNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF9Lbmi2,@function
+mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 96(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 4(%esi), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 28(%eax), %ebp
+ movl 24(%eax), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 20(%esi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 28(%esi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 32(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 104(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ subl (%esi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 24(%esi), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 28(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ movl %ecx, %ebp
+ sbbl 32(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ js .LBB137_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB137_2:
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB137_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB137_4:
+ movl %eax, 4(%ecx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB137_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB137_6:
+ movl %eax, 8(%ecx)
+ movl %ebp, %eax
+ js .LBB137_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB137_8:
+ movl %edx, 12(%ecx)
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB137_10
+# BB#9:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB137_10:
+ movl %ebx, 16(%ecx)
+ js .LBB137_12
+# BB#11:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB137_12:
+ movl %edi, 20(%ecx)
+ js .LBB137_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB137_14:
+ movl %esi, 24(%ecx)
+ js .LBB137_16
+# BB#15:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB137_16:
+ movl %edx, 28(%ecx)
+ js .LBB137_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB137_18:
+ movl %eax, 32(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end137:
+ .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2
+
+ .globl mcl_fp_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub9Lbmi2,@function
+mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 56(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ sbbl 20(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 28(%esi), %ebp
+ sbbl 28(%edi), %ebp
+ movl 32(%esi), %esi
+ sbbl 32(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl %eax, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %esi, 32(%ebx)
+ je .LBB138_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 60(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 32(%ebx)
+.LBB138_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end138:
+ .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2
+
+ .globl mcl_fp_subNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF9Lbmi2,@function
+mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 72(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ movl 76(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ sbbl 4(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ebp
+ movl 24(%edx), %edi
+ movl 20(%edx), %ebx
+ movl 16(%edx), %ecx
+ movl 12(%edx), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 12(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ sbbl 24(%esi), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ sbbl 28(%esi), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl %ecx, %edi
+ shldl $1, %eax, %edi
+ movl 80(%esp), %ebp
+ movl 12(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 4(%ebp), %ebx
+ andl %edi, %ebx
+ andl (%ebp), %edi
+ movl 32(%ebp), %eax
+ andl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ rorxl $31, %ecx, %eax
+ andl 28(%ebp), %ecx
+ movl 24(%ebp), %edx
+ andl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 20(%ebp), %esi
+ andl %eax, %esi
+ movl 16(%ebp), %edx
+ andl %eax, %edx
+ andl 8(%ebp), %eax
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 4(%ebp)
+ movl 4(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 8(%ebp)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 20(%ebp)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%ebp)
+ movl %ecx, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ebp)
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end139:
+ .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2
+
+ .globl mcl_fpDbl_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add9Lbmi2,@function
+mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %edx
+ movl 92(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 88(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 44(%edx), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebp
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl 48(%edi), %ebx
+ adcl %ecx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%edx), %esi
+ movl 56(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ movl 60(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%edx), %eax
+ movl 64(%edi), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edx
+ movl 68(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 100(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ subl (%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 32(%edi), %ebx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB140_2
+# BB#1:
+ movl %ebx, %ebp
+.LBB140_2:
+ testb %dl, %dl
+ movl 60(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB140_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB140_4:
+ movl 88(%esp), %eax
+ movl %edx, 36(%eax)
+ movl %ebx, 40(%eax)
+ movl %edi, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB140_6
+# BB#5:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB140_6:
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB140_8
+# BB#7:
+ movl 24(%esp), %edx # 4-byte Reload
+.LBB140_8:
+ movl %edx, 60(%eax)
+ jne .LBB140_10
+# BB#9:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB140_10:
+ movl %ecx, 64(%eax)
+ movl %ebp, 68(%eax)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end140:
+ .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2
+
+ .globl mcl_fpDbl_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9Lbmi2,@function
+mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 80(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 72(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 44(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl 48(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 84(%esp), %ebp
+ jne .LBB141_1
+# BB#2:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB141_3
+.LBB141_1:
+ movl 32(%ebp), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+.LBB141_3:
+ testb %al, %al
+ jne .LBB141_4
+# BB#5:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB141_6
+.LBB141_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB141_6:
+ jne .LBB141_7
+# BB#8:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB141_9
+.LBB141_7:
+ movl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB141_9:
+ jne .LBB141_10
+# BB#11:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB141_12
+.LBB141_10:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB141_12:
+ jne .LBB141_13
+# BB#14:
+ movl $0, %edi
+ jmp .LBB141_15
+.LBB141_13:
+ movl 20(%ebp), %edi
+.LBB141_15:
+ jne .LBB141_16
+# BB#17:
+ movl $0, %ebx
+ jmp .LBB141_18
+.LBB141_16:
+ movl 16(%ebp), %ebx
+.LBB141_18:
+ jne .LBB141_19
+# BB#20:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB141_21
+.LBB141_19:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB141_21:
+ jne .LBB141_22
+# BB#23:
+ xorl %eax, %eax
+ jmp .LBB141_24
+.LBB141_22:
+ movl 8(%eax), %eax
+.LBB141_24:
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 48(%ecx)
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%ecx)
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv320x32,@function
+.LmulPv320x32: # @mulPv320x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl %edx, %eax
+ movl 48(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 24(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 20(%eax), %ebx, %esi
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ mulxl 24(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 28(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 32(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl %ebx, 20(%ecx)
+ movl %edi, 24(%ecx)
+ movl %esi, 28(%ecx)
+ movl %edx, 32(%ecx)
+ movl 48(%esp), %edx
+ mulxl 36(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ adcl $0, %edx
+ movl %edx, 40(%ecx)
+ movl %ecx, %eax
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+
+ .globl mcl_fp_mulUnitPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre10Lbmi2,@function
+mcl_fp_mulUnitPre10Lbmi2: # @mcl_fp_mulUnitPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L143$pb
+.L143$pb:
+ popl %ebx
+.Ltmp14:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ movl 48(%esp), %ebp
+ movl 44(%esp), %edi
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebp, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10Lbmi2, .Lfunc_end143-mcl_fp_mulUnitPre10Lbmi2
+
+ .globl mcl_fpDbl_mulPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre10Lbmi2,@function
+mcl_fpDbl_mulPre10Lbmi2: # @mcl_fpDbl_mulPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L144$pb
+.L144$pb:
+ popl %ebx
+.Ltmp15:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 20(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 40(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl 28(%esi), %edi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ addl 20(%esi), %ebx
+ movl %ebx, -148(%ebp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ adcl 8(%esi), %edi
+ movl %edi, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ addl 20(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ adcl 24(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ adcl 12(%esi), %eax
+ movl 36(%esi), %ecx
+ adcl 16(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ jb .LBB144_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+.LBB144_2:
+ movl %edi, -136(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl 32(%ebx), %edi
+ movl -96(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%ebx), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, -144(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -160(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -96(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -100(%ebp) # 4-byte Spill
+ jb .LBB144_4
+# BB#3:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+.LBB144_4:
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -132(%ebp), %edi # 4-byte Reload
+ movl %edi, -68(%ebp)
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -64(%ebp)
+ movl %ebx, -92(%ebp)
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl %edx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl %ecx, -76(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl -144(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -56(%ebp)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB144_6
+# BB#5:
+ movl $0, %ebx
+ movl $0, %eax
+ movl $0, %edi
+.LBB144_6:
+ movl %eax, -116(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -92(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -72(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -52(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -124(%ebp), %eax # 4-byte Reload
+ addl %eax, -100(%ebp) # 4-byte Folded Spill
+ adcl %edi, -96(%ebp) # 4-byte Folded Spill
+ movl -108(%ebp), %esi # 4-byte Reload
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl -112(%ebp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ andl $1, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl -100(%ebp), %eax # 4-byte Reload
+ addl -32(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ adcl -24(%ebp), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl %eax, -116(%ebp) # 4-byte Folded Spill
+ movl -52(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl -48(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -44(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -40(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -36(%ebp), %edi
+ sbbl 16(%esi), %edi
+ movl 20(%esi), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ subl %eax, %ecx
+ movl 44(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 48(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ sbbl %eax, -120(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 56(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ addl -124(%ebp), %ecx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 20(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%esi)
+ adcl -136(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ adcl -140(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 32(%esi)
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -160(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -96(%ebp), %ecx # 4-byte Reload
+ adcl -164(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -168(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -104(%ebp), %ecx # 4-byte Reload
+ adcl -172(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -176(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -180(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 72(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 76(%esi)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10Lbmi2, .Lfunc_end144-mcl_fpDbl_mulPre10Lbmi2
+
+ .globl mcl_fpDbl_sqrPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre10Lbmi2,@function
+mcl_fpDbl_sqrPre10Lbmi2: # @mcl_fpDbl_sqrPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L145$pb
+.L145$pb:
+ popl %ebx
+.Ltmp16:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 40(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl 36(%edi), %eax
+ movl 32(%edi), %ebx
+ movl 28(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 20(%edi), %ecx
+ adcl 24(%edi), %edx
+ adcl 8(%edi), %esi
+ adcl 12(%edi), %ebx
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -108(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -104(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -96(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -116(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_1
+# BB#2:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_3
+.LBB145_1:
+ leal (%ecx,%ecx), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+.LBB145_3:
+ movl -96(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ movl -124(%ebp), %edi # 4-byte Reload
+ jb .LBB145_4
+# BB#5:
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_6
+.LBB145_4:
+ movl %edx, %ebx
+ shldl $1, %ecx, %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+.LBB145_6:
+ movl -100(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_7
+# BB#8:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_9
+.LBB145_7:
+ movl %esi, %ebx
+ shldl $1, %edx, %ebx
+ movl %ebx, -100(%ebp) # 4-byte Spill
+.LBB145_9:
+ movl -104(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_10
+# BB#11:
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_12
+.LBB145_10:
+ movl %edi, %ebx
+ shldl $1, %esi, %ebx
+ movl %ebx, -104(%ebp) # 4-byte Spill
+.LBB145_12:
+ movl -108(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_13
+# BB#14:
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_15
+.LBB145_13:
+ movl %eax, %ebx
+ shldl $1, %edi, %ebx
+ movl %ebx, -108(%ebp) # 4-byte Spill
+.LBB145_15:
+ movl %ecx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %esi, -64(%ebp)
+ movl %edi, -60(%ebp)
+ movl %eax, -56(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl %esi, -84(%ebp)
+ movl %edi, -80(%ebp)
+ movl %eax, -76(%ebp)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_16
+# BB#17:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_18
+.LBB145_16:
+ shrl $31, %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+.LBB145_18:
+ leal -52(%ebp), %eax
+ movl %eax, (%esp)
+ leal -72(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -92(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -116(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -120(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl -112(%ebp), %edi # 4-byte Reload
+ addl -32(%ebp), %edi
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ adcl -124(%ebp), %esi # 4-byte Folded Reload
+ movl -52(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -48(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -44(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -36(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 40(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 44(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 48(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 56(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -124(%ebp), %edx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 20(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -132(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%eax)
+ adcl -136(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 28(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -140(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 32(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -160(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -96(%ebp), %edx # 4-byte Reload
+ adcl -164(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 40(%eax)
+ movl -100(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -104(%ebp), %edx # 4-byte Reload
+ adcl -172(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -108(%ebp), %ecx # 4-byte Reload
+ adcl -176(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 52(%eax)
+ adcl -180(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 56(%eax)
+ movl %esi, 60(%eax)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 64(%eax)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 68(%eax)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 72(%eax)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10Lbmi2, .Lfunc_end145-mcl_fpDbl_sqrPre10Lbmi2
+
+ .globl mcl_fp_mont10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont10Lbmi2,@function
+mcl_fp_mont10Lbmi2: # @mcl_fp_mont10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1036, %esp # imm = 0x40C
+ calll .L146$pb
+.L146$pb:
+ popl %ebx
+.Ltmp17:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx
+ movl 1068(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 992(%esp), %edi
+ movl 996(%esp), %ebp
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1024(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1020(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1016(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1000(%esp), %esi
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ addl 944(%esp), %edi
+ adcl 948(%esp), %ebp
+ adcl 952(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1064(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 896(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ addl 896(%esp), %ebp
+ adcl 900(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 848(%esp), %ebp
+ adcl 852(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 876(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 880(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ addl 800(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 836(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 752(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 780(%esp), %esi
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 728(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 744(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ movl %esi, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 656(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 696(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 608(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 624(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 640(%esp), %esi
+ adcl 644(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 592(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 520(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 464(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 472(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 504(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 432(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 444(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 368(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 380(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 384(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 320(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 272(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl %edi, %ecx
+ addl 224(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 264(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ addl 176(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 192(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 128(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 144(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ sbbl %esi, %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ addl 80(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1068(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl 36(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB146_2
+# BB#1:
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB146_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB146_4
+# BB#3:
+ movl %eax, %esi
+.LBB146_4:
+ movl 1056(%esp), %eax
+ movl %esi, (%eax)
+ movl 60(%esp), %edi # 4-byte Reload
+ jne .LBB146_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB146_6:
+ movl %edi, 4(%eax)
+ jne .LBB146_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB146_8:
+ movl %ebx, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB146_10
+# BB#9:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB146_10:
+ movl %ebp, 12(%eax)
+ jne .LBB146_12
+# BB#11:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB146_12:
+ movl %ecx, 16(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB146_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB146_14:
+ movl %ecx, 20(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB146_16
+# BB#15:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB146_16:
+ movl %ecx, 24(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB146_18
+# BB#17:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB146_18:
+ movl %ecx, 32(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB146_20
+# BB#19:
+ movl %edx, %ecx
+.LBB146_20:
+ movl %ecx, 36(%eax)
+ addl $1036, %esp # imm = 0x40C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end146:
+ .size mcl_fp_mont10Lbmi2, .Lfunc_end146-mcl_fp_mont10Lbmi2
+
+ .globl mcl_fp_montNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF10Lbmi2,@function
+mcl_fp_montNF10Lbmi2: # @mcl_fp_montNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1020, %esp # imm = 0x3FC
+ calll .L147$pb
+.L147$pb:
+ popl %ebx
+.Ltmp18:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx
+ movl 1052(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 976(%esp), %edi
+ movl 980(%esp), %esi
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 996(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 992(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 988(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 984(%esp), %ebp
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ adcl 936(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 952(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 920(%esp), %ecx
+ addl 880(%esp), %esi
+ adcl 884(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 832(%esp), %esi
+ adcl 836(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 848(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 824(%esp), %ecx
+ addl 784(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 796(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 736(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 760(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 764(%esp), %ebp
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 768(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 728(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 688(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 712(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 716(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ addl 640(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 672(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 680(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 632(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 604(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 628(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 544(%esp), %esi
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 548(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 552(%esp), %esi
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 556(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 536(%esp), %edx
+ addl 496(%esp), %edi
+ adcl 500(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 448(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 464(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 480(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 440(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 400(%esp), %ecx
+ adcl 404(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 408(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 412(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 416(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 420(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 424(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 428(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 432(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 436(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 352(%esp), %esi
+ adcl 356(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 368(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 344(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 316(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 324(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 256(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 272(%esp), %edi
+ adcl 276(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 248(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 160(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 176(%esp), %edi
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 192(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 152(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 124(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 144(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 64(%esp), %ebp
+ movl %edi, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ adcl 80(%esp), %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 96(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1052(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ecx
+ movl %ebx, %eax
+ sbbl 8(%edi), %eax
+ movl %ebp, %esi
+ sbbl 12(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 16(%edi), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ sbbl 20(%edi), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 24(%edi), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB147_2
+# BB#1:
+ movl %edx, %edi
+.LBB147_2:
+ movl 1040(%esp), %edx
+ movl %edi, (%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ js .LBB147_4
+# BB#3:
+ movl %ecx, %edi
+.LBB147_4:
+ movl %edi, 4(%edx)
+ js .LBB147_6
+# BB#5:
+ movl %eax, %ebx
+.LBB147_6:
+ movl %ebx, 8(%edx)
+ js .LBB147_8
+# BB#7:
+ movl 4(%esp), %ebp # 4-byte Reload
+.LBB147_8:
+ movl %ebp, 12(%edx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ js .LBB147_10
+# BB#9:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB147_10:
+ movl %esi, 16(%edx)
+ js .LBB147_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB147_12:
+ movl %eax, 20(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB147_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB147_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB147_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB147_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB147_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB147_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB147_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB147_20:
+ movl %eax, 36(%edx)
+ addl $1020, %esp # imm = 0x3FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end147:
+ .size mcl_fp_montNF10Lbmi2, .Lfunc_end147-mcl_fp_montNF10Lbmi2
+
+ .globl mcl_fp_montRed10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed10Lbmi2,@function
+mcl_fp_montRed10Lbmi2: # @mcl_fp_montRed10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $604, %esp # imm = 0x25C
+ calll .L148$pb
+.L148$pb:
+ popl %eax
+.Ltmp19:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 632(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 628(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 76(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 560(%esp), %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ adcl 568(%esp), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 576(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 580(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 512(%esp), %esi
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 516(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 536(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 464(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 492(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ movl 60(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 632(%esp), %eax
+ movl %eax, %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 368(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 320(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 352(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 272(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 288(%esp), %ebp
+ adcl 292(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 312(%esp), %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 224(%esp), %eax
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 240(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 260(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 264(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 176(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 184(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 128(%esp), %esi
+ movl %edi, %eax
+ adcl 132(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 140(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 144(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 164(%esp), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB148_2
+# BB#1:
+ movl %edx, 80(%esp) # 4-byte Spill
+.LBB148_2:
+ testb %al, %al
+ movl 112(%esp), %edx # 4-byte Reload
+ jne .LBB148_4
+# BB#3:
+ movl %edi, %edx
+.LBB148_4:
+ movl 624(%esp), %edi
+ movl %edx, (%edi)
+ movl 108(%esp), %edx # 4-byte Reload
+ jne .LBB148_6
+# BB#5:
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB148_6:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ movl 116(%esp), %ecx # 4-byte Reload
+ jne .LBB148_8
+# BB#7:
+ movl %esi, %ecx
+.LBB148_8:
+ movl %ecx, 8(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB148_10
+# BB#9:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB148_10:
+ movl %eax, 16(%edi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ jne .LBB148_12
+# BB#11:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB148_12:
+ movl %ebp, 20(%edi)
+ movl 88(%esp), %ebx # 4-byte Reload
+ jne .LBB148_14
+# BB#13:
+ movl 72(%esp), %ebx # 4-byte Reload
+.LBB148_14:
+ movl %ebx, 24(%edi)
+ jne .LBB148_16
+# BB#15:
+ movl 92(%esp), %edx # 4-byte Reload
+.LBB148_16:
+ movl %edx, 28(%edi)
+ jne .LBB148_18
+# BB#17:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB148_18:
+ movl %ecx, 32(%edi)
+ jne .LBB148_20
+# BB#19:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB148_20:
+ movl %eax, 36(%edi)
+ addl $604, %esp # imm = 0x25C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end148:
+ .size mcl_fp_montRed10Lbmi2, .Lfunc_end148-mcl_fp_montRed10Lbmi2
+
+ .globl mcl_fp_addPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre10Lbmi2,@function
+mcl_fp_addPre10Lbmi2: # @mcl_fp_addPre10Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 28(%ebx)
+ movl %esi, 32(%ebx)
+ movl 36(%eax), %eax
+ movl 36(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 36(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end149:
+ .size mcl_fp_addPre10Lbmi2, .Lfunc_end149-mcl_fp_addPre10Lbmi2
+
+ .globl mcl_fp_subPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre10Lbmi2,@function
+mcl_fp_subPre10Lbmi2: # @mcl_fp_subPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 28(%ebp)
+ movl %edi, 32(%ebp)
+ movl 36(%edx), %edx
+ movl 36(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 36(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end150:
+ .size mcl_fp_subPre10Lbmi2, .Lfunc_end150-mcl_fp_subPre10Lbmi2
+
+ .globl mcl_fp_shr1_10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_10Lbmi2,@function
+mcl_fp_shr1_10Lbmi2: # @mcl_fp_shr1_10Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 32(%esi)
+ shrl %eax
+ movl %eax, 36(%esi)
+ popl %esi
+ retl
+.Lfunc_end151:
+ .size mcl_fp_shr1_10Lbmi2, .Lfunc_end151-mcl_fp_shr1_10Lbmi2
+
+ .globl mcl_fp_add10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add10Lbmi2,@function
+mcl_fp_add10Lbmi2: # @mcl_fp_add10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 52(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 48(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebx), %esi
+ adcl 24(%edi), %esi
+ movl 28(%ebx), %ebp
+ adcl 28(%edi), %ebp
+ movl 32(%ebx), %edx
+ adcl 32(%edi), %edx
+ movl 36(%ebx), %ecx
+ adcl 36(%edi), %ecx
+ movl 44(%esp), %edi
+ movl (%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edi)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%edi)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ movl %esi, 24(%edi)
+ movl %ebp, 28(%edi)
+ movl %edx, 32(%edi)
+ movl %ecx, 36(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 56(%esp), %edi
+ subl (%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ sbbl 28(%edi), %ebp
+ sbbl 32(%edi), %edx
+ sbbl 36(%edi), %ecx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB152_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 44(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl %esi, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+.LBB152_2: # %carry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end152:
+ .size mcl_fp_add10Lbmi2, .Lfunc_end152-mcl_fp_add10Lbmi2
+
+ .globl mcl_fp_addNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF10Lbmi2,@function
+mcl_fp_addNF10Lbmi2: # @mcl_fp_addNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %esi
+ movl 96(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%edx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %ebx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %esi
+ adcl 8(%edx), %esi
+ adcl 12(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 24(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 28(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 32(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ adcl 36(%edx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ subl (%edi), %edx
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 4(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 8(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebp
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebx
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ movl 52(%esp), %esi # 4-byte Reload
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB153_2
+# BB#1:
+ movl %edx, %esi
+.LBB153_2:
+ movl 92(%esp), %edx
+ movl %esi, (%edx)
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB153_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+.LBB153_4:
+ movl %esi, 4(%edx)
+ movl %ebp, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ js .LBB153_6
+# BB#5:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB153_6:
+ movl %ecx, 8(%edx)
+ movl %ebx, %ecx
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB153_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB153_8:
+ movl %esi, 12(%edx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ js .LBB153_10
+# BB#9:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB153_10:
+ movl %ebp, 16(%edx)
+ js .LBB153_12
+# BB#11:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB153_12:
+ movl %ebx, 20(%edx)
+ js .LBB153_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB153_14:
+ movl %edi, 24(%edx)
+ js .LBB153_16
+# BB#15:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB153_16:
+ movl %esi, 28(%edx)
+ js .LBB153_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB153_18:
+ movl %ecx, 32(%edx)
+ js .LBB153_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB153_20:
+ movl %eax, 36(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end153:
+ .size mcl_fp_addNF10Lbmi2, .Lfunc_end153-mcl_fp_addNF10Lbmi2
+
+ .globl mcl_fp_sub10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub10Lbmi2,@function
+mcl_fp_sub10Lbmi2: # @mcl_fp_sub10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 60(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ sbbl 20(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ sbbl 24(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ sbbl 32(%edi), %ebp
+ movl 36(%esi), %esi
+ sbbl 36(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 52(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl %edx, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %esi, 36(%ebx)
+ je .LBB154_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 64(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+.LBB154_2: # %nocarry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end154:
+ .size mcl_fp_sub10Lbmi2, .Lfunc_end154-mcl_fp_sub10Lbmi2
+
+ .globl mcl_fp_subNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF10Lbmi2,@function
+mcl_fp_subNF10Lbmi2: # @mcl_fp_subNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %eax
+ movl 36(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 84(%esp), %ecx
+ subl (%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl 24(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 16(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 36(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %ecx
+ adcl %ecx, %ecx
+ movl %esi, %ebx
+ shrl $31, %ebx
+ orl %edx, %ebx
+ movl 88(%esp), %edi
+ movl 20(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ andl 4(%edi), %ecx
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ andl %ebx, %edx
+ andl (%edi), %ebx
+ movl 36(%edi), %esi
+ andl %eax, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %ebp
+ andl %eax, %ebp
+ movl 28(%edi), %esi
+ andl %eax, %esi
+ andl 24(%edi), %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %edi
+ movl %ebx, (%edi)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 8(%edi)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 20(%edi)
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, 28(%edi)
+ movl %ebp, 32(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end155:
+ .size mcl_fp_subNF10Lbmi2, .Lfunc_end155-mcl_fp_subNF10Lbmi2
+
+ .globl mcl_fpDbl_add10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add10Lbmi2,@function
+mcl_fpDbl_add10Lbmi2: # @mcl_fpDbl_add10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %edx
+ movl 96(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 92(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 48(%edx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %ecx
+ adcl %ebx, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%edx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl 56(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl 60(%edi), %ecx
+ adcl %eax, %ecx
+ movl 64(%edx), %esi
+ movl 64(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 68(%edx), %ebx
+ movl 68(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%edx), %ebx
+ movl 72(%edi), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 76(%edx), %edx
+ movl 76(%edi), %edi
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 104(%esp), %ebx
+ movl 64(%esp), %edi # 4-byte Reload
+ subl (%ebx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ sbbl 20(%ebx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ sbbl 36(%ebx), %edi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB156_2
+# BB#1:
+ movl %edi, %ebp
+.LBB156_2:
+ testb %dl, %dl
+ movl 64(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB156_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB156_4:
+ movl 92(%esp), %eax
+ movl %edx, 40(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 44(%eax)
+ movl %ebx, 48(%eax)
+ movl %edi, 52(%eax)
+ movl %esi, 56(%eax)
+ movl %ecx, 60(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB156_6
+# BB#5:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB156_6:
+ movl %ecx, 64(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB156_8
+# BB#7:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB156_8:
+ movl %edx, 68(%eax)
+ jne .LBB156_10
+# BB#9:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB156_10:
+ movl %ecx, 72(%eax)
+ movl %ebp, 76(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end156:
+ .size mcl_fpDbl_add10Lbmi2, .Lfunc_end156-mcl_fpDbl_add10Lbmi2
+
+ .globl mcl_fpDbl_sub10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub10Lbmi2,@function
+mcl_fpDbl_sub10Lbmi2: # @mcl_fpDbl_sub10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %edx
+ movl 4(%ebp), %esi
+ movl 88(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %esi
+ movl 8(%ebp), %edi
+ sbbl 8(%eax), %edi
+ movl 80(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%ebp), %esi
+ sbbl 16(%eax), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%ebp), %edx
+ sbbl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%ebp), %esi
+ sbbl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%ebp), %edx
+ sbbl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%ebp), %esi
+ sbbl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%ebp), %edx
+ sbbl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%ebp), %esi
+ sbbl %edi, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %edx, 36(%ecx)
+ movl 44(%ebp), %edx
+ sbbl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl 48(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl 52(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%eax), %edx
+ movl 56(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 60(%eax), %edx
+ movl 60(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 64(%eax), %edx
+ movl 64(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%eax), %edx
+ movl 68(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 72(%eax), %edx
+ movl 72(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 76(%eax), %eax
+ movl 76(%ebp), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 92(%esp), %esi
+ jne .LBB157_1
+# BB#2:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB157_3
+.LBB157_1:
+ movl 36(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+.LBB157_3:
+ testb %al, %al
+ jne .LBB157_4
+# BB#5:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB157_6
+.LBB157_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB157_6:
+ jne .LBB157_7
+# BB#8:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB157_9
+.LBB157_7:
+ movl 32(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB157_9:
+ jne .LBB157_10
+# BB#11:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB157_12
+.LBB157_10:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB157_12:
+ jne .LBB157_13
+# BB#14:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB157_15
+.LBB157_13:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB157_15:
+ jne .LBB157_16
+# BB#17:
+ movl $0, %ebp
+ jmp .LBB157_18
+.LBB157_16:
+ movl 20(%esi), %ebp
+.LBB157_18:
+ jne .LBB157_19
+# BB#20:
+ movl $0, %eax
+ jmp .LBB157_21
+.LBB157_19:
+ movl 16(%esi), %eax
+.LBB157_21:
+ jne .LBB157_22
+# BB#23:
+ movl $0, %edx
+ jmp .LBB157_24
+.LBB157_22:
+ movl 12(%esi), %edx
+.LBB157_24:
+ jne .LBB157_25
+# BB#26:
+ xorl %esi, %esi
+ jmp .LBB157_27
+.LBB157_25:
+ movl 8(%esi), %esi
+.LBB157_27:
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl %eax, 72(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10Lbmi2, .Lfunc_end157-mcl_fpDbl_sub10Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv352x32,@function
+.LmulPv352x32: # @mulPv352x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl %edx, %eax
+ movl 52(%esp), %edx
+ mulxl 4(%eax), %ebx, %esi
+ mulxl (%eax), %edi, %ebp
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ebx, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %ebx
+ adcl %edi, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 20(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ mulxl 24(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 28(%eax), %edi, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 32(%eax), %esi, %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ mulxl 36(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl %ebx, 24(%ecx)
+ movl %edi, 28(%ecx)
+ movl %esi, 32(%ecx)
+ movl %edx, 36(%ecx)
+ movl 52(%esp), %edx
+ mulxl 40(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ adcl $0, %edx
+ movl %edx, 44(%ecx)
+ movl %ecx, %eax
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+
+ .globl mcl_fp_mulUnitPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre11Lbmi2,@function
+mcl_fp_mulUnitPre11Lbmi2: # @mcl_fp_mulUnitPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L159$pb
+.L159$pb:
+ popl %ebx
+.Ltmp20:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv352x32
+ movl 84(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11Lbmi2, .Lfunc_end159-mcl_fp_mulUnitPre11Lbmi2
+
+ .globl mcl_fpDbl_mulPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre11Lbmi2,@function
+mcl_fpDbl_mulPre11Lbmi2: # @mcl_fpDbl_mulPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L160$pb
+.L160$pb:
+ popl %eax
+.Ltmp21:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 648(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 644(%esp), %edx
+ movl %edx, %ebp
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %esi
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 648(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %ebp, %edx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 428(%esp), %ecx
+ movl 432(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 396(%esp), %ebp
+ movl 392(%esp), %edi
+ movl 388(%esp), %esi
+ movl 380(%esp), %ecx
+ movl 384(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 348(%esp), %ebx
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 300(%esp), %ebp
+ movl 296(%esp), %edi
+ movl 292(%esp), %esi
+ movl 284(%esp), %ecx
+ movl 288(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %ebx
+ movl 248(%esp), %edi
+ movl 244(%esp), %esi
+ movl 236(%esp), %ecx
+ movl 240(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %edi
+ movl 36(%edi), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 644(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11Lbmi2, .Lfunc_end160-mcl_fpDbl_mulPre11Lbmi2
+
+ .globl mcl_fpDbl_sqrPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre11Lbmi2,@function
+mcl_fpDbl_sqrPre11Lbmi2: # @mcl_fpDbl_sqrPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L161$pb
+.L161$pb:
+ popl %ebx
+.Ltmp22:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %ebp
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %esi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 448(%esp), %ebx
+ movl 444(%esp), %edi
+ movl 440(%esp), %esi
+ movl 436(%esp), %edx
+ movl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 396(%esp), %edi
+ movl 392(%esp), %esi
+ movl 388(%esp), %edx
+ movl 380(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 384(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 348(%esp), %ebp
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 260(%esp), %ebx
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %edi
+ movl 248(%esp), %esi
+ movl 244(%esp), %edx
+ movl 236(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 240(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11Lbmi2, .Lfunc_end161-mcl_fpDbl_sqrPre11Lbmi2
+
+ .globl mcl_fp_mont11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont11Lbmi2,@function
+mcl_fp_mont11Lbmi2: # @mcl_fp_mont11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L162$pb
+.L162$pb:
+ popl %ebx
+.Ltmp23:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %edi
+ movl 1084(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ adcl 1044(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ sbbl %edi, %edi
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1024(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 936(%esp), %esi
+ adcl 940(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 964(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ addl 888(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %esi
+ movl %esi, %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 840(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 844(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 848(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 852(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 872(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ adcl 880(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 792(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 828(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 744(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 776(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 788(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 648(%esp), %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ adcl 680(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 608(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 624(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 552(%esp), %edi
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 560(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 580(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 584(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 504(%esp), %ecx
+ adcl 508(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 536(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 456(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 484(%esp), %edi
+ adcl 488(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ adcl 412(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ adcl 432(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 360(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 368(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ adcl 316(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 332(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 348(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 264(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 300(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 304(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 252(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ andl $1, %ecx
+ addl 168(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 172(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 184(%esp), %ebp
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ addl 120(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 136(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1164(%esp), %ebp
+ subl (%ebp), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ sbbl 12(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, %ebp
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB162_2
+# BB#1:
+ movl %ebx, 28(%esp) # 4-byte Spill
+.LBB162_2:
+ movl %esi, %ebx
+ testb %bl, %bl
+ movl 68(%esp), %ebx # 4-byte Reload
+ jne .LBB162_4
+# BB#3:
+ movl %eax, %ebx
+.LBB162_4:
+ movl 1152(%esp), %eax
+ movl %ebx, (%eax)
+ movl 56(%esp), %edi # 4-byte Reload
+ jne .LBB162_6
+# BB#5:
+ movl %edx, %edi
+.LBB162_6:
+ movl %edi, 4(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB162_8
+# BB#7:
+ movl %ecx, %edx
+.LBB162_8:
+ movl %edx, 8(%eax)
+ jne .LBB162_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB162_10:
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB162_12
+# BB#11:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB162_12:
+ movl %ecx, 16(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB162_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB162_14:
+ movl %ecx, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ jne .LBB162_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB162_16:
+ movl %ecx, 24(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ jne .LBB162_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB162_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB162_20
+# BB#19:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB162_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB162_22
+# BB#21:
+ movl %ebp, %ecx
+.LBB162_22:
+ movl %ecx, 40(%eax)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end162:
+ .size mcl_fp_mont11Lbmi2, .Lfunc_end162-mcl_fp_mont11Lbmi2
+
+ .globl mcl_fp_montNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF11Lbmi2,@function
+mcl_fp_montNF11Lbmi2: # @mcl_fp_montNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L163$pb
+.L163$pb:
+ popl %ebx
+.Ltmp24:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %ebp
+ movl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %edi
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ adcl 1044(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1048(%esp), %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1028(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 996(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 1000(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ addl 936(%esp), %ebp
+ adcl 940(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 960(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 980(%esp), %ebp
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 932(%esp), %eax
+ addl 888(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 900(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 908(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 840(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 860(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 884(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 836(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 792(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 800(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 812(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 816(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 820(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 832(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 744(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 768(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 780(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 784(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 740(%esp), %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 712(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 716(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 720(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 732(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 736(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 648(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 672(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 688(%esp), %esi
+ movl %esi, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 644(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 608(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 612(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 616(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 620(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 624(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 628(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 632(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 552(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 548(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 512(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 456(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 480(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 488(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 452(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 428(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 360(%esp), %esi
+ adcl 364(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 372(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 332(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 264(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 284(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 292(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 260(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 240(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 244(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 168(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 176(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 180(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 204(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 164(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 120(%esp), %ecx
+ adcl 124(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 136(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ adcl 88(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1164(%esp), %ebx
+ subl (%ebx), %edx
+ movl %ecx, %esi
+ sbbl 4(%ebx), %esi
+ movl %edi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 28(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl 36(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 40(%ebx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 68(%esp), %ebx # 4-byte Reload
+ js .LBB163_2
+# BB#1:
+ movl %edx, %ebx
+.LBB163_2:
+ movl 1152(%esp), %edx
+ movl %ebx, (%edx)
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB163_4
+# BB#3:
+ movl %esi, %ebp
+.LBB163_4:
+ movl %ebp, 4(%edx)
+ js .LBB163_6
+# BB#5:
+ movl %ecx, %edi
+.LBB163_6:
+ movl %edi, 8(%edx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB163_8
+# BB#7:
+ movl %eax, %ecx
+.LBB163_8:
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB163_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB163_10:
+ movl %eax, 16(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB163_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB163_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB163_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB163_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB163_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB163_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB163_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB163_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB163_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB163_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB163_22
+# BB#21:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB163_22:
+ movl %eax, 40(%edx)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end163:
+ .size mcl_fp_montNF11Lbmi2, .Lfunc_end163-mcl_fp_montNF11Lbmi2
+
+ .globl mcl_fp_montRed11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed11Lbmi2,@function
+mcl_fp_montRed11Lbmi2: # @mcl_fp_montRed11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $668, %esp # imm = 0x29C
+ calll .L164$pb
+.L164$pb:
+ popl %eax
+.Ltmp25:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 696(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 84(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebp
+ movl 20(%ecx), %edi
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 616(%esp), %ecx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 620(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 632(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 640(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 568(%esp), %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 572(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 472(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 424(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 404(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 696(%esp), %eax
+ movl %eax, %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 284(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 304(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 232(%esp), %ebp
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 236(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 276(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 136(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 128(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 180(%esp), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %ebp, %ebx
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB164_2
+# BB#1:
+ movl %esi, 112(%esp) # 4-byte Spill
+.LBB164_2:
+ testb %bl, %bl
+ movl 132(%esp), %esi # 4-byte Reload
+ jne .LBB164_4
+# BB#3:
+ movl %edi, %esi
+.LBB164_4:
+ movl 688(%esp), %edi
+ movl %esi, (%edi)
+ movl 104(%esp), %esi # 4-byte Reload
+ jne .LBB164_6
+# BB#5:
+ movl %edx, 128(%esp) # 4-byte Spill
+.LBB164_6:
+ movl 128(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%edi)
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB164_8
+# BB#7:
+ movl %ecx, %edx
+.LBB164_8:
+ movl %edx, 8(%edi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 92(%esp), %edx # 4-byte Reload
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB164_10
+# BB#9:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB164_10:
+ movl %ecx, 16(%edi)
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB164_12
+# BB#11:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB164_12:
+ movl %eax, 20(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB164_14
+# BB#13:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB164_14:
+ movl %ebp, 24(%edi)
+ jne .LBB164_16
+# BB#15:
+ movl 76(%esp), %esi # 4-byte Reload
+.LBB164_16:
+ movl %esi, 28(%edi)
+ jne .LBB164_18
+# BB#17:
+ movl 84(%esp), %edx # 4-byte Reload
+.LBB164_18:
+ movl %edx, 32(%edi)
+ jne .LBB164_20
+# BB#19:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB164_20:
+ movl %ecx, 36(%edi)
+ jne .LBB164_22
+# BB#21:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB164_22:
+ movl %eax, 40(%edi)
+ addl $668, %esp # imm = 0x29C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end164:
+ .size mcl_fp_montRed11Lbmi2, .Lfunc_end164-mcl_fp_montRed11Lbmi2
+
+ .globl mcl_fp_addPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre11Lbmi2,@function
+mcl_fp_addPre11Lbmi2: # @mcl_fp_addPre11Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl 40(%eax), %eax
+ movl 40(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end165:
+ .size mcl_fp_addPre11Lbmi2, .Lfunc_end165-mcl_fp_addPre11Lbmi2
+
+ .globl mcl_fp_subPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre11Lbmi2,@function
+mcl_fp_subPre11Lbmi2: # @mcl_fp_subPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 32(%ebp)
+ movl %esi, 36(%ebp)
+ movl 40(%edx), %edx
+ movl 40(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 40(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end166:
+ .size mcl_fp_subPre11Lbmi2, .Lfunc_end166-mcl_fp_subPre11Lbmi2
+
+ .globl mcl_fp_shr1_11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_11Lbmi2,@function
+mcl_fp_shr1_11Lbmi2: # @mcl_fp_shr1_11Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 32(%esi)
+ movl 40(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 36(%esi)
+ shrl %eax
+ movl %eax, 40(%esi)
+ popl %esi
+ retl
+.Lfunc_end167:
+ .size mcl_fp_shr1_11Lbmi2, .Lfunc_end167-mcl_fp_shr1_11Lbmi2
+
+ .globl mcl_fp_add11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add11Lbmi2,@function
+mcl_fp_add11Lbmi2: # @mcl_fp_add11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 60(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 56(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl 16(%esi), %ecx
+ adcl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ebx
+ adcl 28(%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ adcl 32(%edi), %ecx
+ movl 36(%esi), %eax
+ adcl 36(%edi), %eax
+ movl 40(%esi), %edx
+ adcl 40(%edi), %edx
+ movl 52(%esp), %esi
+ movl %ebp, (%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%esi)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%esi)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%esi)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%esi)
+ movl %ebx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edx, 40(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 64(%esp), %ebp
+ movl 4(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl (%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %ecx
+ sbbl 36(%ebp), %eax
+ sbbl 40(%ebp), %edx
+ movl %edx, %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB168_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%esi)
+ movl 28(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%esi)
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%esi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%esi)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edi, 40(%esi)
+.LBB168_2: # %carry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end168:
+ .size mcl_fp_add11Lbmi2, .Lfunc_end168-mcl_fp_add11Lbmi2
+
+ .globl mcl_fp_addNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF11Lbmi2,@function
+mcl_fp_addNF11Lbmi2: # @mcl_fp_addNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 104(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ebx
+ movl 36(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ movl 16(%edx), %edi
+ movl 12(%edx), %eax
+ movl 8(%edx), %ecx
+ adcl 8(%esi), %ecx
+ adcl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 24(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl 40(%esi), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx
+ movl 52(%esp), %esi # 4-byte Reload
+ subl (%ebx), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ movl %edi, %ebp
+ sbbl 36(%ebx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB169_2
+# BB#1:
+ movl %esi, %edi
+.LBB169_2:
+ movl 100(%esp), %esi
+ movl %edi, (%esi)
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB169_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB169_4:
+ movl %edi, 4(%esi)
+ movl %eax, %edi
+ js .LBB169_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB169_6:
+ movl %edx, 8(%esi)
+ movl %ebp, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB169_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB169_8:
+ movl %eax, 12(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB169_10
+# BB#9:
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+.LBB169_10:
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ js .LBB169_12
+# BB#11:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB169_12:
+ movl %ebp, 20(%esi)
+ js .LBB169_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB169_14:
+ movl %edi, 24(%esi)
+ js .LBB169_16
+# BB#15:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB169_16:
+ movl %eax, 28(%esi)
+ js .LBB169_18
+# BB#17:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB169_18:
+ movl %edx, 32(%esi)
+ js .LBB169_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB169_20:
+ movl %ecx, 36(%esi)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB169_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB169_22:
+ movl %eax, 40(%esi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end169:
+ .size mcl_fp_addNF11Lbmi2, .Lfunc_end169-mcl_fp_addNF11Lbmi2
+
+ .globl mcl_fp_sub11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub11Lbmi2,@function
+mcl_fp_sub11Lbmi2: # @mcl_fp_sub11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ sbbl 12(%edi), %ebx
+ movl 16(%ebp), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 36(%ebp), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 16(%esp), %esi # 4-byte Reload
+ movl $0, %ebx
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %ebp, 12(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%ebx)
+ movl %ecx, %edi
+ movl %eax, 40(%ebx)
+ je .LBB170_2
+# BB#1: # %carry
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %eax, %esi
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl %ebp, %eax
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+.LBB170_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end170:
+ .size mcl_fp_sub11Lbmi2, .Lfunc_end170-mcl_fp_sub11Lbmi2
+
+ .globl mcl_fp_subNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF11Lbmi2,@function
+mcl_fp_subNF11Lbmi2: # @mcl_fp_subNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 92(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 28(%eax), %ebx
+ movl 24(%eax), %ebp
+ movl 20(%eax), %esi
+ movl 16(%eax), %edx
+ movl 12(%eax), %ecx
+ movl 8(%eax), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl %ecx, %edx
+ shldl $1, %eax, %edx
+ movl 96(%esp), %ebx
+ movl 4(%ebx), %eax
+ andl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ andl (%ebx), %edx
+ movl 40(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 36(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%ebx), %ebp
+ andl %ecx, %ebp
+ rorxl $31, %ecx, %eax
+ andl 20(%ebx), %ecx
+ movl 16(%ebx), %edi
+ andl %eax, %edi
+ movl 12(%ebx), %esi
+ andl %eax, %esi
+ andl 8(%ebx), %eax
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 8(%ebx)
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%ebx)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 16(%ebx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, 20(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end171:
+ .size mcl_fp_subNF11Lbmi2, .Lfunc_end171-mcl_fp_subNF11Lbmi2
+
+ .globl mcl_fpDbl_add11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add11Lbmi2,@function
+mcl_fpDbl_add11Lbmi2: # @mcl_fpDbl_add11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %ecx
+ movl 104(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 100(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 52(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %edx, 40(%eax)
+ movl 48(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl 60(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl 72(%ecx), %esi
+ movl 72(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebx
+ movl 76(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 84(%ecx), %ecx
+ movl 84(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 112(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 24(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 28(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 32(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 40(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB172_2
+# BB#1:
+ movl %edi, %ebx
+.LBB172_2:
+ testb %cl, %cl
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ jne .LBB172_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB172_4:
+ movl 100(%esp), %eax
+ movl %ecx, 44(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl %ebp, 56(%eax)
+ movl %edi, 60(%eax)
+ movl %esi, 64(%eax)
+ movl %edx, 68(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ jne .LBB172_6
+# BB#5:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB172_6:
+ movl %edx, 72(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB172_8
+# BB#7:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB172_8:
+ movl %edx, 76(%eax)
+ jne .LBB172_10
+# BB#9:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB172_10:
+ movl %ecx, 80(%eax)
+ movl %ebx, 84(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end172:
+ .size mcl_fpDbl_add11Lbmi2, .Lfunc_end172-mcl_fpDbl_add11Lbmi2
+
+ .globl mcl_fpDbl_sub11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub11Lbmi2,@function
+mcl_fpDbl_sub11Lbmi2: # @mcl_fpDbl_sub11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %esi
+ movl 100(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %esi
+ movl 8(%edx), %edi
+ sbbl 8(%ebp), %edi
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%ebp), %eax
+ movl %esi, 4(%ecx)
+ movl 16(%edx), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %edi, %eax
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%edx), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%edx), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%edx), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl %esi, 40(%ecx)
+ movl 48(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 104(%esp), %ebp
+ jne .LBB173_1
+# BB#2:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB173_3
+.LBB173_1:
+ movl 40(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+.LBB173_3:
+ testb %al, %al
+ jne .LBB173_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB173_6
+.LBB173_4:
+ movl (%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB173_6:
+ jne .LBB173_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB173_9
+.LBB173_7:
+ movl 36(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB173_9:
+ jne .LBB173_10
+# BB#11:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB173_12
+.LBB173_10:
+ movl 32(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB173_12:
+ jne .LBB173_13
+# BB#14:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB173_15
+.LBB173_13:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB173_15:
+ jne .LBB173_16
+# BB#17:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB173_18
+.LBB173_16:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB173_18:
+ jne .LBB173_19
+# BB#20:
+ movl $0, %edx
+ jmp .LBB173_21
+.LBB173_19:
+ movl 20(%ebp), %edx
+.LBB173_21:
+ jne .LBB173_22
+# BB#23:
+ movl $0, %edi
+ jmp .LBB173_24
+.LBB173_22:
+ movl 16(%ebp), %edi
+.LBB173_24:
+ jne .LBB173_25
+# BB#26:
+ movl $0, %ebx
+ jmp .LBB173_27
+.LBB173_25:
+ movl 12(%ebp), %ebx
+.LBB173_27:
+ jne .LBB173_28
+# BB#29:
+ xorl %ebp, %ebp
+ jmp .LBB173_30
+.LBB173_28:
+ movl 8(%ebp), %ebp
+.LBB173_30:
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 44(%ecx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 52(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 56(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl %eax, 80(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11Lbmi2, .Lfunc_end173-mcl_fpDbl_sub11Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv384x32,@function
+.LmulPv384x32: # @mulPv384x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl %edx, %eax
+ movl 56(%esp), %edx
+ mulxl 4(%eax), %ebx, %edi
+ mulxl (%eax), %esi, %ebp
+ movl %esi, 32(%esp) # 4-byte Spill
+ addl %ebx, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 8(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ mulxl 12(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ mulxl 20(%eax), %esi, %ebx
+ adcl %edi, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 24(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ mulxl 28(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 32(%eax), %edi, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 36(%eax), %esi, %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ mulxl 40(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ movl %esi, 36(%ecx)
+ movl %edx, 40(%ecx)
+ movl 56(%esp), %edx
+ mulxl 44(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl $0, %edx
+ movl %edx, 48(%ecx)
+ movl %ecx, %eax
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+
+ .globl mcl_fp_mulUnitPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre12Lbmi2,@function
+mcl_fp_mulUnitPre12Lbmi2: # @mcl_fp_mulUnitPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L175$pb
+.L175$pb:
+ popl %ebx
+.Ltmp26:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12Lbmi2, .Lfunc_end175-mcl_fp_mulUnitPre12Lbmi2
+
+ .globl mcl_fpDbl_mulPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre12Lbmi2,@function
+mcl_fpDbl_mulPre12Lbmi2: # @mcl_fpDbl_mulPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L176$pb
+.L176$pb:
+ popl %ebx
+.Ltmp27:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ leal 24(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 24(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 48(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl 40(%edi), %ebx
+ movl 36(%edi), %eax
+ movl 32(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 24(%edi), %esi
+ adcl 28(%edi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ adcl 16(%edi), %ebx
+ movl %ebx, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ addl 24(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 4(%edi), %eax
+ adcl 28(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl 32(%edi), %eax
+ adcl 8(%edi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl 36(%edi), %eax
+ adcl 12(%edi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ adcl 16(%edi), %ecx
+ movl 44(%edi), %eax
+ adcl 20(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl %esi, -160(%ebp) # 4-byte Spill
+ movl %esi, %edx
+ jb .LBB176_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+.LBB176_2:
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 44(%esi), %edi
+ movl -112(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%esi), %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl %eax, -124(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -116(%ebp) # 4-byte Spill
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -120(%ebp) # 4-byte Spill
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -136(%ebp), %esi # 4-byte Reload
+ movl %esi, -152(%ebp) # 4-byte Spill
+ jb .LBB176_4
+# BB#3:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+.LBB176_4:
+ movl %edx, -84(%ebp)
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -80(%ebp)
+ movl -188(%ebp), %edx # 4-byte Reload
+ movl %edx, -76(%ebp)
+ movl -168(%ebp), %edi # 4-byte Reload
+ movl %edi, -72(%ebp)
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl -140(%ebp), %edx # 4-byte Reload
+ movl %edx, -104(%ebp)
+ movl -144(%ebp), %edx # 4-byte Reload
+ movl %edx, -100(%ebp)
+ movl -148(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %eax, -88(%ebp)
+ movl %edi, %ebx
+ sbbl %edx, %edx
+ movl -132(%ebp), %eax # 4-byte Reload
+ movl %eax, -64(%ebp)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB176_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+.LBB176_6:
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -108(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -84(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -152(%ebp), %edi # 4-byte Reload
+ addl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -128(%ebp) # 4-byte Folded Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl %eax, -120(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -116(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl %eax, -112(%ebp) # 4-byte Folded Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl %eax, -124(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -132(%ebp) # 4-byte Spill
+ movl -164(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ addl -36(%ebp), %edi
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -112(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl %esi, -132(%ebp) # 4-byte Folded Spill
+ movl -60(%ebp), %ecx
+ movl 8(%ebp), %eax
+ subl (%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -56(%ebp), %esi
+ sbbl 4(%eax), %esi
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl -44(%ebp), %ebx
+ sbbl 16(%eax), %ebx
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %esi
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ addl -148(%ebp), %ecx # 4-byte Folded Reload
+ adcl -152(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %esi, 28(%eax)
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 40(%eax)
+ adcl -192(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 44(%eax)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ adcl -196(%ebp), %ecx # 4-byte Folded Reload
+ movl %edi, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ movl -132(%ebp), %edx # 4-byte Reload
+ adcl -216(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %edx, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12Lbmi2, .Lfunc_end176-mcl_fpDbl_mulPre12Lbmi2
+
+ .globl mcl_fpDbl_sqrPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre12Lbmi2,@function
+mcl_fpDbl_sqrPre12Lbmi2: # @mcl_fpDbl_sqrPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L177$pb
+.L177$pb:
+ popl %ebx
+.Ltmp28:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx
+ movl %ebx, -152(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ leal 24(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 48(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl 44(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ movl 36(%edi), %eax
+ movl (%edi), %ebx
+ movl 4(%edi), %esi
+ addl 24(%edi), %ebx
+ adcl 28(%edi), %esi
+ movl 32(%edi), %ecx
+ adcl 8(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ adcl 16(%edi), %edx
+ movl %edx, %ecx
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edx
+ popl %eax
+ movl %edx, -124(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ sbbl %edi, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ addl %edi, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ movl %esi, %eax
+ adcl %edi, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_2
+# BB#1:
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+.LBB177_2:
+ movl -144(%ebp), %esi # 4-byte Reload
+ addl %esi, %esi
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -120(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_3
+# BB#4:
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_5
+.LBB177_3:
+ movl %eax, %edx
+ shrl $31, %edx
+ orl %esi, %edx
+ movl %edx, -120(%ebp) # 4-byte Spill
+.LBB177_5:
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %ecx, %esi
+ addl %esi, %esi
+ adcl %edx, %edx
+ movl -124(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_6
+# BB#7:
+ xorl %edx, %edx
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ jmp .LBB177_8
+.LBB177_6:
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ shrl $31, %ecx
+ orl %esi, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %ecx # 4-byte Reload
+.LBB177_8:
+ movl %edx, -124(%ebp) # 4-byte Spill
+ movl %ebx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -76(%ebp)
+ movl %edi, -72(%ebp)
+ movl %ecx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -64(%ebp)
+ movl %ebx, -108(%ebp)
+ movl %eax, -104(%ebp)
+ movl %esi, -100(%ebp)
+ movl %edi, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl -156(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB177_9
+# BB#10:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_11
+.LBB177_9:
+ shrl $31, %edx
+ movl %edx, -136(%ebp) # 4-byte Spill
+.LBB177_11:
+ leal -108(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -84(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, (%esp)
+ movl -148(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -152(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl -112(%ebp), %eax # 4-byte Reload
+ addl -36(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -32(%ebp), %edi
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -60(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -56(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -44(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -132(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -148(%ebp), %edx # 4-byte Folded Reload
+ adcl -152(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 28(%eax)
+ movl -144(%ebp), %edx # 4-byte Reload
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl -168(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -192(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -196(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -128(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl -216(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %esi, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12Lbmi2, .Lfunc_end177-mcl_fpDbl_sqrPre12Lbmi2
+
+ .globl mcl_fp_mont12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont12Lbmi2,@function
+mcl_fp_mont12Lbmi2: # @mcl_fp_mont12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L178$pb
+.L178$pb:
+ popl %ebx
+.Ltmp29:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx
+ movl 1468(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 1384(%esp), %ebp
+ movl 1388(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1428(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1424(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1420(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1396(%esp), %edi
+ movl 1392(%esp), %esi
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ addl 1328(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 1340(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1372(%esp), %esi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1376(%esp), %ebp
+ sbbl %edi, %edi
+ movl 1464(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1272(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1316(%esp), %ebp
+ adcl 1320(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1216(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1248(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1260(%esp), %ebp
+ adcl 1264(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1160(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1160(%esp), %ecx
+ adcl 1164(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 1204(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1104(%esp), %ecx
+ movl 1468(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1104(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1140(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1148(%esp), %edi
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1152(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1080(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ adcl 1092(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 992(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1008(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1032(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 936(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 948(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl 984(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 880(%esp), %eax
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 924(%esp), %esi
+ movl %esi, %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 840(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 864(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 768(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 780(%esp), %ebp
+ adcl 784(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 800(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 712(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 720(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 752(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %eax, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 656(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 664(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 672(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 676(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 616(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 620(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 636(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 648(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 544(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 560(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 584(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 480(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 376(%esp), %ecx
+ adcl 380(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 320(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ adcl 336(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 360(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 284(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 208(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 224(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ adcl 240(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 152(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 164(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ addl 96(%esp), %edi
+ movl 84(%esp), %ebx # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl 108(%esp), %ebx
+ adcl 112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %esi
+ movl 1468(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 8(%edx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 36(%edx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 40(%edx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ sbbl 44(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB178_2
+# BB#1:
+ movl %ecx, 52(%esp) # 4-byte Spill
+.LBB178_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB178_4
+# BB#3:
+ movl %eax, %ecx
+.LBB178_4:
+ movl 1456(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %edi # 4-byte Reload
+ jne .LBB178_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB178_6:
+ movl %edi, 4(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB178_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB178_8:
+ movl %ebx, 8(%eax)
+ jne .LBB178_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+.LBB178_10:
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB178_12
+# BB#11:
+ movl 28(%esp), %ebp # 4-byte Reload
+.LBB178_12:
+ movl %ebp, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB178_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB178_14:
+ movl %ecx, 20(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB178_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB178_16:
+ movl %ecx, 24(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB178_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB178_18:
+ movl %ecx, 32(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB178_20
+# BB#19:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB178_20:
+ movl %ecx, 36(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB178_22
+# BB#21:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB178_22:
+ movl %ecx, 40(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB178_24
+# BB#23:
+ movl %edx, %ecx
+.LBB178_24:
+ movl %ecx, 44(%eax)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end178:
+ .size mcl_fp_mont12Lbmi2, .Lfunc_end178-mcl_fp_mont12Lbmi2
+
+ .globl mcl_fp_montNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF12Lbmi2,@function
+mcl_fp_montNF12Lbmi2: # @mcl_fp_montNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1420, %esp # imm = 0x58C
+ calll .L179$pb
+.L179$pb:
+ popl %ebx
+.Ltmp30:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx
+ movl 1452(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1368(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1368(%esp), %ebp
+ movl 1372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1416(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1392(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1388(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1380(%esp), %edi
+ movl 1376(%esp), %esi
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1312(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1320(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1344(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1356(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1360(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1304(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ addl 1256(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1260(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1264(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1284(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1296(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1200(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1208(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %ebp
+ adcl 1248(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1192(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1144(%esp), %edx
+ adcl 1148(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1160(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1184(%esp), %ebp
+ adcl 1188(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1088(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 1088(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 1104(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1124(%esp), %esi
+ adcl 1128(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1136(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1080(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1032(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1064(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1076(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 976(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1024(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 968(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 920(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 944(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 956(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 864(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 856(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 808(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 828(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 832(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 752(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 776(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 792(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1448(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 744(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ adcl 700(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 724(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 728(%esp), %edi
+ adcl 732(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 640(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 648(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 660(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ adcl 672(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 632(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 584(%esp), %ecx
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 608(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 528(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 540(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 564(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 520(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 512(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 416(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 440(%esp), %ebp
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 408(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 372(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 304(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 312(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 296(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 248(%esp), %ecx
+ adcl 252(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 260(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 272(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 192(%esp), %esi
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 200(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 184(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 136(%esp), %ecx
+ adcl 140(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 144(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ adcl 164(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 80(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 104(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 112(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1452(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 8(%ebp), %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 76(%esp), %ebp # 4-byte Reload
+ js .LBB179_2
+# BB#1:
+ movl %edx, %ebp
+.LBB179_2:
+ movl 1440(%esp), %edx
+ movl %ebp, (%edx)
+ movl 68(%esp), %edi # 4-byte Reload
+ js .LBB179_4
+# BB#3:
+ movl %eax, %edi
+.LBB179_4:
+ movl %edi, 4(%edx)
+ js .LBB179_6
+# BB#5:
+ movl %ebx, %esi
+.LBB179_6:
+ movl %esi, 8(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB179_8
+# BB#7:
+ movl %ecx, %eax
+.LBB179_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB179_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB179_10:
+ movl %eax, 16(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB179_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB179_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB179_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB179_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB179_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB179_16:
+ movl %eax, 28(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB179_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB179_18:
+ movl %eax, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB179_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB179_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB179_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB179_22:
+ movl %eax, 40(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB179_24
+# BB#23:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB179_24:
+ movl %eax, 44(%edx)
+ addl $1420, %esp # imm = 0x58C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end179:
+ .size mcl_fp_montNF12Lbmi2, .Lfunc_end179-mcl_fp_montNF12Lbmi2
+
+ .globl mcl_fp_montRed12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed12Lbmi2,@function
+mcl_fp_montRed12Lbmi2: # @mcl_fp_montRed12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $828, %esp # imm = 0x33C
+ calll .L180$pb
+.L180$pb:
+ popl %eax
+.Ltmp31:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 856(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 852(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 92(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 44(%ecx), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 776(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 776(%esp), %eax
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 796(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 804(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 720(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 724(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 752(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 664(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 692(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 608(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 612(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 552(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 496(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 532(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %esi # 4-byte Reload
+ adcl 476(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 856(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 384(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 400(%esp), %ebp
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 416(%esp), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %esi # 4-byte Reload
+ adcl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 100(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 352(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ adcl 364(%esp), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 272(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 280(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 300(%esp), %esi
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 216(%esp), %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 248(%esp), %esi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 160(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx # 4-byte Reload
+ adcl 176(%esp), %ebx
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 188(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 24(%esp), %edi # 4-byte Folded Reload
+ movl 156(%esp), %esi # 4-byte Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl 28(%esp), %ebx # 4-byte Folded Reload
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 140(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl $0, %ebp
+ andl $1, %ebp
+ jne .LBB180_2
+# BB#1:
+ movl %ebx, 148(%esp) # 4-byte Spill
+.LBB180_2:
+ movl %ebp, %ebx
+ testb %bl, %bl
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB180_4
+# BB#3:
+ movl %edi, %ebx
+.LBB180_4:
+ movl 848(%esp), %edi
+ movl %ebx, (%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB180_6
+# BB#5:
+ movl %esi, 156(%esp) # 4-byte Spill
+.LBB180_6:
+ movl 156(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edi)
+ movl 136(%esp), %esi # 4-byte Reload
+ jne .LBB180_8
+# BB#7:
+ movl %edx, %esi
+.LBB180_8:
+ movl %esi, 8(%edi)
+ movl 148(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB180_10
+# BB#9:
+ movl %ecx, %edx
+.LBB180_10:
+ movl %edx, 16(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ movl 140(%esp), %ecx # 4-byte Reload
+ jne .LBB180_12
+# BB#11:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB180_12:
+ movl %ecx, 20(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ jne .LBB180_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB180_14:
+ movl %eax, 24(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB180_16
+# BB#15:
+ movl 92(%esp), %ebx # 4-byte Reload
+.LBB180_16:
+ movl %ebx, 28(%edi)
+ jne .LBB180_18
+# BB#17:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB180_18:
+ movl %esi, 32(%edi)
+ jne .LBB180_20
+# BB#19:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB180_20:
+ movl %edx, 36(%edi)
+ jne .LBB180_22
+# BB#21:
+ movl 112(%esp), %ecx # 4-byte Reload
+.LBB180_22:
+ movl %ecx, 40(%edi)
+ jne .LBB180_24
+# BB#23:
+ movl 132(%esp), %eax # 4-byte Reload
+.LBB180_24:
+ movl %eax, 44(%edi)
+ addl $828, %esp # imm = 0x33C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end180:
+ .size mcl_fp_montRed12Lbmi2, .Lfunc_end180-mcl_fp_montRed12Lbmi2
+
+ .globl mcl_fp_addPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre12Lbmi2,@function
+mcl_fp_addPre12Lbmi2: # @mcl_fp_addPre12Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl 44(%eax), %eax
+ movl 44(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 44(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end181:
+ .size mcl_fp_addPre12Lbmi2, .Lfunc_end181-mcl_fp_addPre12Lbmi2
+
+ .globl mcl_fp_subPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre12Lbmi2,@function
+mcl_fp_subPre12Lbmi2: # @mcl_fp_subPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl 44(%edx), %edx
+ movl 44(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 44(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end182:
+ .size mcl_fp_subPre12Lbmi2, .Lfunc_end182-mcl_fp_subPre12Lbmi2
+
+ .globl mcl_fp_shr1_12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_12Lbmi2,@function
+mcl_fp_shr1_12Lbmi2: # @mcl_fp_shr1_12Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 40(%ecx)
+ shrl %eax
+ movl %eax, 44(%ecx)
+ popl %esi
+ retl
+.Lfunc_end183:
+ .size mcl_fp_shr1_12Lbmi2, .Lfunc_end183-mcl_fp_shr1_12Lbmi2
+
+ .globl mcl_fp_add12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add12Lbmi2,@function
+mcl_fp_add12Lbmi2: # @mcl_fp_add12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ebx
+ movl (%ebx), %edx
+ movl 4(%ebx), %ecx
+ movl 60(%esp), %eax
+ addl (%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 8(%ebx), %ecx
+ adcl 8(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 16(%eax), %ecx
+ adcl 12(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ adcl 20(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ adcl 24(%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ adcl 28(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 32(%eax), %ebp
+ adcl 32(%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ adcl 36(%ebx), %edi
+ movl 40(%eax), %esi
+ adcl 40(%ebx), %esi
+ movl 44(%eax), %edx
+ adcl 44(%ebx), %edx
+ movl 56(%esp), %ebx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%ebx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %ebp
+ subl (%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ sbbl 32(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ sbbl 40(%ebp), %esi
+ sbbl 44(%ebp), %edx
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB184_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+.LBB184_2: # %carry
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end184:
+ .size mcl_fp_add12Lbmi2, .Lfunc_end184-mcl_fp_add12Lbmi2
+
+ .globl mcl_fp_addNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF12Lbmi2,@function
+mcl_fp_addNF12Lbmi2: # @mcl_fp_addNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ movl 112(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 4(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ movl 36(%esi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ adcl 12(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 28(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 32(%edx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%edx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 40(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ subl (%ebp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB185_2
+# BB#1:
+ movl %edx, %edi
+.LBB185_2:
+ movl 108(%esp), %edx
+ movl %edi, (%edx)
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB185_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB185_4:
+ movl %edi, 4(%edx)
+ movl %eax, %ebp
+ js .LBB185_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB185_6:
+ movl %esi, 8(%edx)
+ movl %ecx, %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB185_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB185_8:
+ movl %ecx, 12(%edx)
+ movl 76(%esp), %ebx # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ js .LBB185_10
+# BB#9:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB185_10:
+ movl %eax, 16(%edx)
+ movl 80(%esp), %ecx # 4-byte Reload
+ js .LBB185_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+.LBB185_12:
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edx)
+ js .LBB185_14
+# BB#13:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB185_14:
+ movl %ebp, 24(%edx)
+ js .LBB185_16
+# BB#15:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB185_16:
+ movl %edi, 28(%edx)
+ js .LBB185_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB185_18:
+ movl %ebx, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB185_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB185_20:
+ movl %eax, 36(%edx)
+ js .LBB185_22
+# BB#21:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB185_22:
+ movl %esi, 40(%edx)
+ js .LBB185_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB185_24:
+ movl %ecx, 44(%edx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end185:
+ .size mcl_fp_addNF12Lbmi2, .Lfunc_end185-mcl_fp_addNF12Lbmi2
+
+ .globl mcl_fp_sub12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub12Lbmi2,@function
+mcl_fp_sub12Lbmi2: # @mcl_fp_sub12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%esi), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ sbbl 40(%edi), %ebp
+ movl 44(%esi), %esi
+ sbbl 44(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl %ebp, 40(%ebx)
+ movl %esi, 44(%ebx)
+ je .LBB186_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 72(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 44(%ebx)
+.LBB186_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end186:
+ .size mcl_fp_sub12Lbmi2, .Lfunc_end186-mcl_fp_sub12Lbmi2
+
+ .globl mcl_fp_subNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF12Lbmi2,@function
+mcl_fp_subNF12Lbmi2: # @mcl_fp_subNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 100(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %edi
+ adcl %edi, %edi
+ movl %eax, %ebp
+ adcl %ebp, %ebp
+ movl %eax, %esi
+ adcl %esi, %esi
+ shrl $31, %ecx
+ orl %edx, %ecx
+ movl 104(%esp), %edx
+ andl 12(%edx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ andl 8(%edx), %ebp
+ andl 4(%edx), %edi
+ andl (%edx), %ecx
+ movl 44(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 28(%edx), %esi
+ andl %eax, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 24(%edx), %ebx
+ andl %eax, %ebx
+ movl 20(%edx), %esi
+ andl %eax, %esi
+ andl 16(%edx), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %edx
+ movl %ecx, (%edx)
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 4(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 16(%edx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edx)
+ movl %eax, 40(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end187:
+ .size mcl_fp_subNF12Lbmi2, .Lfunc_end187-mcl_fp_subNF12Lbmi2
+
+ .globl mcl_fpDbl_add12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add12Lbmi2,@function
+mcl_fpDbl_add12Lbmi2: # @mcl_fpDbl_add12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %ecx
+ movl 112(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 108(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 56(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%edi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %ebx
+ movl %esi, 44(%eax)
+ movl 52(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl 60(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl 64(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl 72(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl 76(%edi), %edx
+ adcl %eax, %edx
+ movl 80(%ecx), %esi
+ movl 80(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebx
+ movl 84(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 92(%ecx), %ecx
+ movl 92(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 120(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl 36(%ebp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 44(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 44(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB188_2
+# BB#1:
+ movl %edi, %ebx
+.LBB188_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB188_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB188_4:
+ movl 108(%esp), %eax
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl %ebp, 64(%eax)
+ movl %edi, 68(%eax)
+ movl %esi, 72(%eax)
+ movl %edx, 76(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB188_6
+# BB#5:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB188_6:
+ movl %edx, 80(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB188_8
+# BB#7:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB188_8:
+ movl %edx, 84(%eax)
+ jne .LBB188_10
+# BB#9:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB188_10:
+ movl %ecx, 88(%eax)
+ movl %ebx, 92(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end188:
+ .size mcl_fpDbl_add12Lbmi2, .Lfunc_end188-mcl_fpDbl_add12Lbmi2
+
+ .globl mcl_fpDbl_sub12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub12Lbmi2,@function
+mcl_fpDbl_sub12Lbmi2: # @mcl_fpDbl_sub12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ movl 100(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 104(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%esi), %edi
+ sbbl 8(%ebx), %edi
+ movl 96(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%ebx), %edx
+ movl %edi, 8(%ecx)
+ movl 20(%ebx), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %edi, %eax
+ movl 24(%ebx), %edi
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %edi, %edx
+ movl 28(%ebx), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %edi, %eax
+ movl 32(%ebx), %edi
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %edi, %edx
+ movl 36(%ebx), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%esi), %eax
+ sbbl %edi, %eax
+ movl 40(%ebx), %edi
+ movl %edx, 32(%ecx)
+ movl 40(%esi), %edx
+ sbbl %edi, %edx
+ movl 44(%ebx), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%esi), %eax
+ sbbl %edi, %eax
+ movl 48(%ebx), %edi
+ movl %edx, 40(%ecx)
+ movl 48(%esi), %edx
+ sbbl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 52(%ebx), %edx
+ movl %eax, 44(%ecx)
+ movl 52(%esi), %eax
+ sbbl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 108(%esp), %ebp
+ jne .LBB189_1
+# BB#2:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB189_3
+.LBB189_1:
+ movl 44(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+.LBB189_3:
+ testb %al, %al
+ jne .LBB189_4
+# BB#5:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB189_6
+.LBB189_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB189_6:
+ jne .LBB189_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB189_9
+.LBB189_7:
+ movl 40(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB189_9:
+ jne .LBB189_10
+# BB#11:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB189_12
+.LBB189_10:
+ movl 36(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB189_12:
+ jne .LBB189_13
+# BB#14:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB189_15
+.LBB189_13:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB189_15:
+ jne .LBB189_16
+# BB#17:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB189_18
+.LBB189_16:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB189_18:
+ jne .LBB189_19
+# BB#20:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB189_21
+.LBB189_19:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB189_21:
+ jne .LBB189_22
+# BB#23:
+ movl $0, %ebx
+ jmp .LBB189_24
+.LBB189_22:
+ movl 20(%ebp), %ebx
+.LBB189_24:
+ jne .LBB189_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB189_27
+.LBB189_25:
+ movl 16(%ebp), %eax
+.LBB189_27:
+ jne .LBB189_28
+# BB#29:
+ movl %ebp, %edx
+ movl $0, %ebp
+ jmp .LBB189_30
+.LBB189_28:
+ movl %ebp, %edx
+ movl 12(%edx), %ebp
+.LBB189_30:
+ jne .LBB189_31
+# BB#32:
+ xorl %edx, %edx
+ jmp .LBB189_33
+.LBB189_31:
+ movl 8(%edx), %edx
+.LBB189_33:
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl %eax, 88(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%ecx)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12Lbmi2, .Lfunc_end189-mcl_fpDbl_sub12Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv416x32,@function
+.LmulPv416x32: # @mulPv416x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl %edx, %eax
+ movl 64(%esp), %ebx
+ movl %ebx, %edx
+ mulxl 4(%eax), %esi, %ebp
+ movl %ebx, %edx
+ mulxl (%eax), %edi, %edx
+ movl %edi, 40(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 8(%eax), %edx, %esi
+ adcl %ebp, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 12(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 16(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 20(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 24(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 28(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 32(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 36(%eax), %edi, %ebp
+ adcl %esi, %edi
+ movl %ebx, %edx
+ mulxl 40(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %ebx, %edx
+ mulxl 44(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl %edi, 36(%ecx)
+ movl %esi, 40(%ecx)
+ movl %edx, 44(%ecx)
+ movl %ebx, %edx
+ mulxl 48(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl $0, %edx
+ movl %edx, 52(%ecx)
+ movl %ecx, %eax
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+
+ .globl mcl_fp_mulUnitPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre13Lbmi2,@function
+mcl_fp_mulUnitPre13Lbmi2: # @mcl_fp_mulUnitPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L191$pb
+.L191$pb:
+ popl %ebx
+.Ltmp32:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv416x32
+ movl 100(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13Lbmi2, .Lfunc_end191-mcl_fp_mulUnitPre13Lbmi2
+
+ .globl mcl_fpDbl_mulPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre13Lbmi2,@function
+mcl_fpDbl_mulPre13Lbmi2: # @mcl_fpDbl_mulPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L192$pb
+.L192$pb:
+ popl %edi
+.Ltmp33:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 868(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 872(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %edi
+ movl 44(%edi), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 868(%esp), %eax
+ movl %eax, %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13Lbmi2, .Lfunc_end192-mcl_fpDbl_mulPre13Lbmi2
+
+ .globl mcl_fpDbl_sqrPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre13Lbmi2,@function
+mcl_fpDbl_sqrPre13Lbmi2: # @mcl_fpDbl_sqrPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L193$pb
+.L193$pb:
+ popl %ebx
+.Ltmp34:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 868(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13Lbmi2, .Lfunc_end193-mcl_fpDbl_sqrPre13Lbmi2
+
+ .globl mcl_fp_mont13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont13Lbmi2,@function
+mcl_fp_mont13Lbmi2: # @mcl_fp_mont13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L194$pb
+.L194$pb:
+ popl %ebx
+.Ltmp35:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %esi
+ movl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %edi
+ movl 1500(%esp), %ebp
+ movl 1496(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1444(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 1448(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1472(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1376(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1388(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1404(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1420(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1428(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1360(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1364(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1368(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1372(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1264(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1308(%esp), %ebp
+ adcl 1312(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1208(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1248(%esp), %edi
+ adcl 1252(%esp), %ebp
+ movl %ebp, %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1260(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1152(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1192(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1096(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1132(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1136(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1144(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1040(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1068(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 1072(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1084(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 984(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 872(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 904(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 844(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 848(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 792(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 812(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 712(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ adcl 724(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 748(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 752(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %eax, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 648(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 652(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 656(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 664(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 612(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 536(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 544(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 560(%esp), %esi
+ adcl 564(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ addl 368(%esp), %ebp
+ adcl 372(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 376(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 312(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 344(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 268(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 200(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 208(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ adcl 148(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 152(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 88(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 112(%esp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 116(%esp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 120(%esp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 124(%esp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 128(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 132(%esp), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 136(%esp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 140(%esp), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1580(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %ecx
+ sbbl 8(%ebx), %ebp
+ sbbl 12(%ebx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ sbbl 20(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 28(%ebx), %edx
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, %ebx
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB194_2
+# BB#1:
+ movl %edx, 32(%esp) # 4-byte Spill
+.LBB194_2:
+ movl %edi, %edx
+ testb %dl, %dl
+ movl 80(%esp), %edx # 4-byte Reload
+ jne .LBB194_4
+# BB#3:
+ movl %eax, %edx
+.LBB194_4:
+ movl 1568(%esp), %eax
+ movl %edx, (%eax)
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB194_6
+# BB#5:
+ movl %ecx, %esi
+.LBB194_6:
+ movl %esi, 4(%eax)
+ jne .LBB194_8
+# BB#7:
+ movl %ebp, 76(%esp) # 4-byte Spill
+.LBB194_8:
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB194_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+.LBB194_10:
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB194_12
+# BB#11:
+ movl 8(%esp), %ebp # 4-byte Reload
+.LBB194_12:
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB194_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB194_14:
+ movl %ecx, 20(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB194_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB194_16:
+ movl %ecx, 24(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB194_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB194_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB194_20
+# BB#19:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB194_20:
+ movl %ecx, 36(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB194_22
+# BB#21:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB194_22:
+ movl %ecx, 40(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB194_24
+# BB#23:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB194_24:
+ movl %ecx, 44(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB194_26
+# BB#25:
+ movl %ebx, %ecx
+.LBB194_26:
+ movl %ecx, 48(%eax)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end194:
+ .size mcl_fp_mont13Lbmi2, .Lfunc_end194-mcl_fp_mont13Lbmi2
+
+ .globl mcl_fp_montNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF13Lbmi2,@function
+mcl_fp_montNF13Lbmi2: # @mcl_fp_montNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L195$pb
+.L195$pb:
+ popl %ebx
+.Ltmp36:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %edi
+ movl 1492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1500(%esp), %esi
+ movl 1496(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ adcl 1444(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1472(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1484(%esp), %edi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1428(%esp), %ecx
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1376(%esp), %edx
+ adcl 1380(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1424(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1320(%esp), %esi
+ adcl 1324(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1360(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1316(%esp), %eax
+ addl 1264(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 1284(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1208(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 1228(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1252(%esp), %edi
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1256(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1204(%esp), %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 1152(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1160(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1192(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1196(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ addl 1096(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1116(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1120(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1140(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1148(%esp), %ebp
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1092(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 1040(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1056(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 1060(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1088(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl $0, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 984(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 996(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1008(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 980(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 948(%esp), %ebp
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 968(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 872(%esp), %edi
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 912(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 868(%esp), %edx
+ addl 816(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 860(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 760(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 780(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 788(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 804(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 756(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 716(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 728(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 732(%esp), %esi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 744(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 748(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 752(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 648(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 696(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 644(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 624(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 536(%esp), %edi
+ adcl 540(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 556(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ adcl 568(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 532(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 496(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 504(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 424(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 452(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 420(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 368(%esp), %ecx
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 392(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 312(%esp), %esi
+ adcl 316(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 308(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ adcl 260(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 200(%esp), %esi
+ adcl 204(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 216(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 228(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 196(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 156(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 164(%esp), %ebp
+ adcl 168(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 88(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 100(%esp), %edi
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 104(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1580(%esp), %eax
+ subl (%eax), %edx
+ movl %ecx, %ebp
+ sbbl 4(%eax), %ebp
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ sbbl 12(%eax), %ebx
+ sbbl 16(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 32(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 48(%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB195_2
+# BB#1:
+ movl %edx, %eax
+.LBB195_2:
+ movl 1568(%esp), %edx
+ movl %eax, (%edx)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB195_4
+# BB#3:
+ movl %ebp, %esi
+.LBB195_4:
+ movl %esi, 4(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB195_6
+# BB#5:
+ movl %ecx, %edi
+.LBB195_6:
+ movl %edi, 8(%edx)
+ js .LBB195_8
+# BB#7:
+ movl %ebx, %eax
+.LBB195_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB195_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB195_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB195_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB195_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB195_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB195_14:
+ movl %eax, 24(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB195_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB195_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB195_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB195_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB195_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB195_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB195_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB195_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB195_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB195_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB195_26
+# BB#25:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB195_26:
+ movl %eax, 48(%edx)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end195:
+ .size mcl_fp_montNF13Lbmi2, .Lfunc_end195-mcl_fp_montNF13Lbmi2
+
+ .globl mcl_fp_montRed13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed13Lbmi2,@function
+mcl_fp_montRed13Lbmi2: # @mcl_fp_montRed13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $892, %esp # imm = 0x37C
+ calll .L196$pb
+.L196$pb:
+ popl %eax
+.Ltmp37:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 920(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 916(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 100(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 832(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 832(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 836(%esp), %ecx
+ adcl 840(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 860(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ andl $1, %esi
+ addl 776(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 720(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 664(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 608(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 612(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 552(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 496(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl 476(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 384(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 408(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 920(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 344(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 360(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 272(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 280(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 300(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 216(%esp), %ebp
+ movl %edi, %ecx
+ adcl 220(%esp), %ecx
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 160(%esp), %esi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %edi, %eax
+ adcl 184(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 156(%esp), %edi # 4-byte Reload
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %ebx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %edx # 4-byte Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %esi, %eax
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 124(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB196_2
+# BB#1:
+ movl %ebp, 148(%esp) # 4-byte Spill
+.LBB196_2:
+ testb %al, %al
+ movl 156(%esp), %ebp # 4-byte Reload
+ jne .LBB196_4
+# BB#3:
+ movl %edi, %ebp
+.LBB196_4:
+ movl 912(%esp), %edi
+ movl %ebp, (%edi)
+ movl 140(%esp), %ebp # 4-byte Reload
+ jne .LBB196_6
+# BB#5:
+ movl %ebx, %ebp
+.LBB196_6:
+ movl %ebp, 4(%edi)
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB196_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB196_8:
+ movl %ebx, 8(%edi)
+ movl 148(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edi)
+ movl 116(%esp), %ebx # 4-byte Reload
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB196_10
+# BB#9:
+ movl 72(%esp), %esi # 4-byte Reload
+.LBB196_10:
+ movl %esi, 16(%edi)
+ movl 112(%esp), %esi # 4-byte Reload
+ movl 132(%esp), %edx # 4-byte Reload
+ jne .LBB196_12
+# BB#11:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB196_12:
+ movl %edx, 20(%edi)
+ movl 96(%esp), %edx # 4-byte Reload
+ movl 144(%esp), %ecx # 4-byte Reload
+ jne .LBB196_14
+# BB#13:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB196_14:
+ movl %ecx, 24(%edi)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl 136(%esp), %eax # 4-byte Reload
+ jne .LBB196_16
+# BB#15:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB196_16:
+ movl %eax, 28(%edi)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB196_18
+# BB#17:
+ movl 88(%esp), %ebx # 4-byte Reload
+.LBB196_18:
+ movl %ebx, 32(%edi)
+ jne .LBB196_20
+# BB#19:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB196_20:
+ movl %esi, 36(%edi)
+ jne .LBB196_22
+# BB#21:
+ movl 108(%esp), %edx # 4-byte Reload
+.LBB196_22:
+ movl %edx, 40(%edi)
+ jne .LBB196_24
+# BB#23:
+ movl 120(%esp), %ecx # 4-byte Reload
+.LBB196_24:
+ movl %ecx, 44(%edi)
+ jne .LBB196_26
+# BB#25:
+ movl 124(%esp), %eax # 4-byte Reload
+.LBB196_26:
+ movl %eax, 48(%edi)
+ addl $892, %esp # imm = 0x37C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end196:
+ .size mcl_fp_montRed13Lbmi2, .Lfunc_end196-mcl_fp_montRed13Lbmi2
+
+ .globl mcl_fp_addPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre13Lbmi2,@function
+mcl_fp_addPre13Lbmi2: # @mcl_fp_addPre13Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl 48(%eax), %eax
+ movl 48(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 48(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end197:
+ .size mcl_fp_addPre13Lbmi2, .Lfunc_end197-mcl_fp_addPre13Lbmi2
+
+ .globl mcl_fp_subPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre13Lbmi2,@function
+mcl_fp_subPre13Lbmi2: # @mcl_fp_subPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 40(%ebp)
+ movl %esi, 44(%ebp)
+ movl 48(%edx), %edx
+ movl 48(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 48(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end198:
+ .size mcl_fp_subPre13Lbmi2, .Lfunc_end198-mcl_fp_subPre13Lbmi2
+
+ .globl mcl_fp_shr1_13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_13Lbmi2,@function
+mcl_fp_shr1_13Lbmi2: # @mcl_fp_shr1_13Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 44(%ecx)
+ shrl %eax
+ movl %eax, 48(%ecx)
+ popl %esi
+ retl
+.Lfunc_end199:
+ .size mcl_fp_shr1_13Lbmi2, .Lfunc_end199-mcl_fp_shr1_13Lbmi2
+
+ .globl mcl_fp_add13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add13Lbmi2,@function
+mcl_fp_add13Lbmi2: # @mcl_fp_add13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 64(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%ebx), %eax
+ adcl 24(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ adcl 28(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ adcl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ adcl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 40(%ebx), %edi
+ adcl 40(%ebp), %edi
+ movl 44(%ebx), %edx
+ adcl 44(%ebp), %edx
+ movl 48(%ebx), %esi
+ adcl 48(%ebp), %esi
+ movl 60(%esp), %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ecx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 72(%esp), %ecx
+ subl (%ecx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 4(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ sbbl 8(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ecx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ sbbl 36(%ecx), %ebx
+ sbbl 40(%ecx), %edi
+ sbbl 44(%ecx), %edx
+ sbbl 48(%ecx), %esi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB200_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ebx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+.LBB200_2: # %carry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end200:
+ .size mcl_fp_add13Lbmi2, .Lfunc_end200-mcl_fp_add13Lbmi2
+
+ .globl mcl_fp_addNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF13Lbmi2,@function
+mcl_fp_addNF13Lbmi2: # @mcl_fp_addNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 124(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 4(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ movl 28(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %esi
+ adcl 8(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 12(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 24(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 28(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 32(%edx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%edx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 40(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 132(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ subl (%edx), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %ebp
+ sbbl 36(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %edi
+ sbbl 40(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 64(%esp), %edx # 4-byte Reload
+ js .LBB201_2
+# BB#1:
+ movl %eax, %edx
+.LBB201_2:
+ movl 120(%esp), %esi
+ movl %edx, (%esi)
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB201_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB201_4:
+ movl %edx, 4(%esi)
+ movl %edi, %edx
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB201_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB201_6:
+ movl %eax, 8(%esi)
+ movl %ebp, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB201_8
+# BB#7:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB201_8:
+ movl %ebx, 12(%esi)
+ movl 96(%esp), %ebp # 4-byte Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ js .LBB201_10
+# BB#9:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB201_10:
+ movl %ecx, 16(%esi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB201_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB201_12:
+ movl %eax, 20(%esi)
+ movl 72(%esp), %ebx # 4-byte Reload
+ js .LBB201_14
+# BB#13:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB201_14:
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%esi)
+ js .LBB201_16
+# BB#15:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB201_16:
+ movl %ebp, 28(%esi)
+ js .LBB201_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB201_18:
+ movl %ebx, 32(%esi)
+ js .LBB201_20
+# BB#19:
+ movl 32(%esp), %edi # 4-byte Reload
+.LBB201_20:
+ movl %edi, 36(%esi)
+ js .LBB201_22
+# BB#21:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB201_22:
+ movl %edx, 40(%esi)
+ js .LBB201_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB201_24:
+ movl %ecx, 44(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB201_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB201_26:
+ movl %eax, 48(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end201:
+ .size mcl_fp_addNF13Lbmi2, .Lfunc_end201-mcl_fp_addNF13Lbmi2
+
+ .globl mcl_fp_sub13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub13Lbmi2,@function
+mcl_fp_sub13Lbmi2: # @mcl_fp_sub13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 72(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %edx
+ sbbl 32(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ sbbl 36(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 44(%esi), %ebp
+ sbbl 44(%edi), %ebp
+ movl 48(%esi), %esi
+ sbbl 48(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 64(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+ movl %eax, 40(%ebx)
+ movl %ebp, 44(%ebx)
+ movl %esi, 48(%ebx)
+ je .LBB202_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 76(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 48(%ebx)
+.LBB202_2: # %nocarry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end202:
+ .size mcl_fp_sub13Lbmi2, .Lfunc_end202-mcl_fp_sub13Lbmi2
+
+ .globl mcl_fp_subNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF13Lbmi2,@function
+mcl_fp_subNF13Lbmi2: # @mcl_fp_subNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 104(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 108(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %ebx
+ movl 24(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ sbbl 48(%edi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ shldl $1, %edx, %ecx
+ movl 112(%esp), %edi
+ movl 4(%edi), %eax
+ andl %ecx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ andl (%edi), %ecx
+ movl 48(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 28(%edi), %eax
+ andl %esi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%edi), %ebp
+ andl %esi, %ebp
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ rorxl $31, %esi, %eax
+ andl 12(%edi), %esi
+ andl 8(%edi), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %edi
+ movl %ecx, (%edi)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 8(%edi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 12(%edi)
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edi)
+ movl %eax, 44(%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%edi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end203:
+ .size mcl_fp_subNF13Lbmi2, .Lfunc_end203-mcl_fp_subNF13Lbmi2
+
+ .globl mcl_fpDbl_add13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add13Lbmi2,@function
+mcl_fpDbl_add13Lbmi2: # @mcl_fpDbl_add13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 124(%esp), %ecx
+ movl 120(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 116(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 60(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edx, 48(%eax)
+ movl 56(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 100(%ecx), %ecx
+ movl 100(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 128(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 44(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 48(%ebp), %ecx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB204_2
+# BB#1:
+ movl %ecx, %ebx
+.LBB204_2:
+ testb %dl, %dl
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB204_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB204_4:
+ movl 116(%esp), %eax
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ movl %ebp, 72(%eax)
+ movl %edi, 76(%eax)
+ movl %esi, 80(%eax)
+ movl %edx, 84(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ jne .LBB204_6
+# BB#5:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB204_6:
+ movl %esi, 88(%eax)
+ jne .LBB204_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB204_8:
+ movl %edx, 92(%eax)
+ jne .LBB204_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB204_10:
+ movl %ecx, 96(%eax)
+ movl %ebx, 100(%eax)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end204:
+ .size mcl_fpDbl_add13Lbmi2, .Lfunc_end204-mcl_fpDbl_add13Lbmi2
+
+ .globl mcl_fpDbl_sub13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub13Lbmi2,@function
+mcl_fpDbl_sub13Lbmi2: # @mcl_fpDbl_sub13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 112(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl 36(%ebx), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %esi, %eax
+ movl 40(%ebx), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%edi), %edx
+ sbbl %esi, %edx
+ movl 44(%ebx), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%edi), %eax
+ sbbl %esi, %eax
+ movl 48(%ebx), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%edi), %edx
+ sbbl %esi, %edx
+ movl 52(%ebx), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%edi), %eax
+ sbbl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl %edx, 48(%ecx)
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 96(%ebx), %eax
+ movl 96(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%ebx), %eax
+ movl 100(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 116(%esp), %edi
+ jne .LBB205_1
+# BB#2:
+ movl $0, 44(%esp) # 4-byte Folded Spill
+ jmp .LBB205_3
+.LBB205_1:
+ movl 48(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+.LBB205_3:
+ testb %al, %al
+ jne .LBB205_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB205_6
+.LBB205_4:
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB205_6:
+ jne .LBB205_7
+# BB#8:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB205_9
+.LBB205_7:
+ movl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB205_9:
+ jne .LBB205_10
+# BB#11:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB205_12
+.LBB205_10:
+ movl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB205_12:
+ jne .LBB205_13
+# BB#14:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB205_15
+.LBB205_13:
+ movl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB205_15:
+ jne .LBB205_16
+# BB#17:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB205_18
+.LBB205_16:
+ movl 32(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB205_18:
+ jne .LBB205_19
+# BB#20:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB205_21
+.LBB205_19:
+ movl 28(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB205_21:
+ jne .LBB205_22
+# BB#23:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB205_24
+.LBB205_22:
+ movl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB205_24:
+ jne .LBB205_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB205_27
+.LBB205_25:
+ movl 20(%edi), %eax
+.LBB205_27:
+ jne .LBB205_28
+# BB#29:
+ movl $0, %edx
+ jmp .LBB205_30
+.LBB205_28:
+ movl 16(%edi), %edx
+.LBB205_30:
+ jne .LBB205_31
+# BB#32:
+ movl $0, %esi
+ jmp .LBB205_33
+.LBB205_31:
+ movl 12(%edi), %esi
+.LBB205_33:
+ jne .LBB205_34
+# BB#35:
+ xorl %edi, %edi
+ jmp .LBB205_36
+.LBB205_34:
+ movl 8(%edi), %edi
+.LBB205_36:
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 56(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 64(%ecx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl %eax, 96(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13Lbmi2, .Lfunc_end205-mcl_fpDbl_sub13Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv448x32,@function
+.LmulPv448x32: # @mulPv448x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl %edx, %eax
+ movl 68(%esp), %ebx
+ movl %ebx, %edx
+ mulxl 4(%eax), %edi, %esi
+ movl %ebx, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 44(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 8(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 16(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 24(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 32(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 40(%eax), %edi, %ebp
+ adcl %esi, %edi
+ movl %ebx, %edx
+ mulxl 44(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %ebx, %edx
+ mulxl 48(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl %edi, 40(%ecx)
+ movl %esi, 44(%ecx)
+ movl %edx, 48(%ecx)
+ movl %ebx, %edx
+ mulxl 52(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ adcl $0, %edx
+ movl %edx, 56(%ecx)
+ movl %ecx, %eax
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+
+ .globl mcl_fp_mulUnitPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre14Lbmi2,@function
+mcl_fp_mulUnitPre14Lbmi2: # @mcl_fp_mulUnitPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L207$pb
+.L207$pb:
+ popl %ebx
+.Ltmp38:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14Lbmi2, .Lfunc_end207-mcl_fp_mulUnitPre14Lbmi2
+
+ .globl mcl_fpDbl_mulPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre14Lbmi2,@function
+mcl_fpDbl_mulPre14Lbmi2: # @mcl_fpDbl_mulPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L208$pb
+.L208$pb:
+ popl %ebx
+.Ltmp39:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx
+ movl %ebx, -192(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ leal 28(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 28(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 56(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl 44(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 36(%edi), %edx
+ movl (%edi), %edi
+ movl 12(%ebp), %ecx
+ movl 4(%ecx), %ecx
+ movl 12(%ebp), %ebx
+ addl 28(%ebx), %edi
+ movl %edi, -180(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ adcl 32(%edi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -212(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl 16(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl %eax, %ebx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl (%esi), %eax
+ addl 28(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ movl 4(%esi), %eax
+ adcl 32(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 40(%esi), %eax
+ adcl 12(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 44(%esi), %eax
+ adcl 16(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl 52(%esi), %eax
+ adcl 24(%esi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -220(%ebp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -188(%ebp) # 4-byte Spill
+ jb .LBB208_2
+# BB#1:
+ xorl %esi, %esi
+ movl $0, -184(%ebp) # 4-byte Folded Spill
+ movl $0, -188(%ebp) # 4-byte Folded Spill
+.LBB208_2:
+ movl %esi, -204(%ebp) # 4-byte Spill
+ movl 52(%edi), %esi
+ movl 48(%edi), %ebx
+ movl -128(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%edi), %ebx
+ movl %ebx, -160(%ebp) # 4-byte Spill
+ adcl 24(%edi), %esi
+ movl %esi, -208(%ebp) # 4-byte Spill
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -132(%ebp) # 4-byte Spill
+ movl -168(%ebp), %esi # 4-byte Reload
+ movl %esi, -136(%ebp) # 4-byte Spill
+ movl -164(%ebp), %esi # 4-byte Reload
+ movl %esi, -140(%ebp) # 4-byte Spill
+ movl -216(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -144(%ebp) # 4-byte Spill
+ jb .LBB208_4
+# BB#3:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+.LBB208_4:
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl -200(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -212(%ebp), %edx # 4-byte Reload
+ movl %edx, -88(%ebp)
+ movl -196(%ebp), %edi # 4-byte Reload
+ movl %edi, -84(%ebp)
+ movl -156(%ebp), %edx # 4-byte Reload
+ movl %edx, -80(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -164(%ebp), %edx # 4-byte Reload
+ movl %edx, -120(%ebp)
+ movl -168(%ebp), %edx # 4-byte Reload
+ movl %edx, -116(%ebp)
+ movl -172(%ebp), %edx # 4-byte Reload
+ movl %edx, -112(%ebp)
+ movl -176(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl %ecx, -104(%ebp)
+ movl %edi, %ebx
+ movl %esi, %edi
+ movl %eax, -100(%ebp)
+ sbbl %edx, %edx
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -76(%ebp)
+ movl -208(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -220(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB208_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %edi
+.LBB208_6:
+ movl %eax, -160(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -124(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -96(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -68(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -188(%ebp), %eax # 4-byte Reload
+ addl %eax, -144(%ebp) # 4-byte Folded Spill
+ adcl %edi, -140(%ebp) # 4-byte Folded Spill
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl %eax, -136(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -132(%ebp) # 4-byte Folded Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl -152(%ebp), %edi # 4-byte Reload
+ adcl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -148(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl -192(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl -144(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ adcl -20(%ebp), %edi
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -68(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -64(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -60(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -44(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -152(%ebp) # 4-byte Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ movl -148(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ sbbl $0, -156(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ subl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl -156(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -172(%ebp), %eax # 4-byte Reload
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ adcl -188(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 40(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -232(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -236(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -132(%ebp), %ecx # 4-byte Reload
+ adcl -240(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -244(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -248(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -252(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ adcl -256(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl %edi, 84(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14Lbmi2, .Lfunc_end208-mcl_fpDbl_mulPre14Lbmi2
+
+ .globl mcl_fpDbl_sqrPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre14Lbmi2,@function
+mcl_fpDbl_sqrPre14Lbmi2: # @mcl_fpDbl_sqrPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L209$pb
+.L209$pb:
+ popl %ebx
+.Ltmp40:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx
+ movl %ebx, -172(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ leal 28(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 56(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl 48(%edi), %eax
+ movl 44(%edi), %ecx
+ movl 36(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ebx
+ addl 28(%edi), %esi
+ adcl 32(%edi), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ adcl 12(%edi), %edx
+ adcl 16(%edi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 52(%edi), %ecx
+ adcl 24(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ seto %al
+ lahf
+ movl %eax, %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_1
+# BB#2:
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_3
+.LBB209_1:
+ leal (%esi,%esi), %eax
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl %eax, -132(%ebp) # 4-byte Spill
+.LBB209_3:
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ movl -180(%ebp), %ebx # 4-byte Reload
+ jb .LBB209_4
+# BB#5:
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_6
+.LBB209_4:
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl -168(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+.LBB209_6:
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl -136(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_7
+# BB#8:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_9
+.LBB209_7:
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl -164(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+.LBB209_9:
+ movl %ebx, %esi
+ movl -140(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_10
+# BB#11:
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_12
+.LBB209_10:
+ movl %edx, %eax
+ movl -160(%ebp), %ebx # 4-byte Reload
+ shldl $1, %ebx, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+.LBB209_12:
+ movl -144(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_13
+# BB#14:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_15
+.LBB209_13:
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+.LBB209_15:
+ movl -148(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_16
+# BB#17:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_18
+.LBB209_16:
+ movl %edi, %eax
+ shldl $1, %esi, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+.LBB209_18:
+ movl -152(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_19
+# BB#20:
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_21
+.LBB209_19:
+ movl %ecx, %eax
+ shldl $1, %edi, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+.LBB209_21:
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, -96(%ebp)
+ movl %eax, -124(%ebp)
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl %eax, -92(%ebp)
+ movl %eax, -120(%ebp)
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -116(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -112(%ebp)
+ movl %esi, -80(%ebp)
+ movl %esi, -108(%ebp)
+ movl %edi, -76(%ebp)
+ movl %edi, -104(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %ecx, -100(%ebp)
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_22
+# BB#23:
+ xorl %edi, %edi
+ jmp .LBB209_24
+.LBB209_22:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB209_24:
+ leal -68(%ebp), %eax
+ movl %eax, (%esp)
+ leal -96(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -124(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -128(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -172(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl -132(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -32(%ebp), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -28(%ebp), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -24(%ebp), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -20(%ebp), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -16(%ebp), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -68(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -60(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -168(%ebp) # 4-byte Spill
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %ecx
+ sbbl 20(%esi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -44(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -164(%ebp) # 4-byte Spill
+ movl 28(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ sbbl %edx, -132(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl -128(%ebp), %ecx # 4-byte Reload
+ sbbl $0, %ecx
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edx # 4-byte Reload
+ subl %eax, %edx
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ sbbl $0, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl %edx, %eax
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -188(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -156(%ebp), %edx # 4-byte Reload
+ adcl -232(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -236(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 64(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -244(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 68(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -248(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 72(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -252(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 76(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -256(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 80(%esi)
+ movl %eax, 84(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14Lbmi2, .Lfunc_end209-mcl_fpDbl_sqrPre14Lbmi2
+
+ .globl mcl_fp_mont14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont14Lbmi2,@function
+mcl_fp_mont14Lbmi2: # @mcl_fp_mont14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1900, %esp # imm = 0x76C
+ calll .L210$pb
+.L210$pb:
+ popl %ebx
+.Ltmp41:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx
+ movl 1932(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 1840(%esp), %edi
+ movl 1844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1896(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 1892(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 1888(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1884(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1880(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1860(%esp), %esi
+ movl 1856(%esp), %ebp
+ movl 1852(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ addl 1776(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1792(%esp), %ebp
+ adcl 1796(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1928(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1712(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1724(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 1728(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1732(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 100(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1648(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1668(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1672(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 1928(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1584(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1584(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1604(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1608(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1636(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 1640(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1520(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1520(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 1544(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1564(%esp), %ebp
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1572(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1456(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1456(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1496(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1500(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1392(%esp), %ecx
+ movl 1932(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %eax
+ addl 1392(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1396(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1420(%esp), %esi
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1424(%esp), %ebp
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1428(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1432(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1924(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1328(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1356(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1384(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1264(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1284(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1316(%esp), %esi
+ adcl 1320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1200(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ adcl 1216(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1248(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1136(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ adcl 1148(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1172(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1180(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1072(%esp), %eax
+ adcl 1076(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1108(%esp), %ebp
+ adcl 1112(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1124(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1128(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1008(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1008(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 944(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 952(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 964(%esp), %esi
+ adcl 968(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 992(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 880(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 896(%esp), %edi
+ adcl 900(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 924(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 824(%esp), %ebp
+ adcl 828(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 856(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 752(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 760(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 784(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 792(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 796(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 800(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 808(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 688(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 728(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 624(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 648(%esp), %esi
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 652(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 560(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 568(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 584(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 528(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 540(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 440(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 368(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 376(%esp), %esi
+ adcl 380(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 312(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 324(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 240(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 248(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 252(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 256(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 268(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 176(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 184(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 192(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 200(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 112(%esp), %esi
+ movl 100(%esp), %esi # 4-byte Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1932(%esp), %ecx
+ subl (%ecx), %eax
+ sbbl 4(%ecx), %edx
+ sbbl 8(%ecx), %esi
+ sbbl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 20(%ecx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 32(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 36(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ecx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ sbbl 48(%ecx), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ sbbl 52(%ecx), %edi
+ movl %ebp, %ecx
+ movl %edi, 104(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB210_2
+# BB#1:
+ movl %ebx, 60(%esp) # 4-byte Spill
+.LBB210_2:
+ testb %cl, %cl
+ movl 108(%esp), %ebx # 4-byte Reload
+ jne .LBB210_4
+# BB#3:
+ movl %eax, %ebx
+.LBB210_4:
+ movl 1920(%esp), %eax
+ movl %ebx, (%eax)
+ movl 92(%esp), %edi # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB210_6
+# BB#5:
+ movl %edx, %edi
+.LBB210_6:
+ movl %edi, 4(%eax)
+ jne .LBB210_8
+# BB#7:
+ movl %esi, 100(%esp) # 4-byte Spill
+.LBB210_8:
+ movl 100(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ jne .LBB210_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+.LBB210_10:
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ jne .LBB210_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB210_12:
+ movl %ecx, 16(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB210_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB210_14:
+ movl %ecx, 20(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB210_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB210_16:
+ movl %ecx, 24(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB210_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB210_18:
+ movl %ecx, 32(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB210_20
+# BB#19:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB210_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB210_22
+# BB#21:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB210_22:
+ movl %ecx, 40(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB210_24
+# BB#23:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB210_24:
+ movl %ecx, 44(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB210_26
+# BB#25:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB210_26:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB210_28
+# BB#27:
+ movl 104(%esp), %ecx # 4-byte Reload
+.LBB210_28:
+ movl %ecx, 52(%eax)
+ addl $1900, %esp # imm = 0x76C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end210:
+ .size mcl_fp_mont14Lbmi2, .Lfunc_end210-mcl_fp_mont14Lbmi2
+
+ .globl mcl_fp_montNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF14Lbmi2,@function
+mcl_fp_montNF14Lbmi2: # @mcl_fp_montNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1884, %esp # imm = 0x75C
+ calll .L211$pb
+.L211$pb:
+ popl %ebx
+.Ltmp42:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx
+ movl 1916(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1824(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1824(%esp), %edi
+ movl 1828(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1880(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1860(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1856(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1852(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1844(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1840(%esp), %esi
+ movl 1836(%esp), %ebp
+ movl 1832(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1760(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1760(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1768(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 1776(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1808(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1816(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1752(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1696(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1708(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1740(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 1916(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ addl 1632(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1664(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1624(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1568(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1596(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1612(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1504(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1544(%esp), %esi
+ adcl 1548(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1560(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1440(%esp), %ecx
+ movl 1908(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 1496(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1440(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1464(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1476(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1488(%esp), %esi
+ adcl 1492(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1424(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1368(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1312(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1248(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ adcl 1276(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1284(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1184(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1240(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1184(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1216(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1232(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1120(%esp), %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1156(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1112(%esp), %eax
+ movl %ebp, %ecx
+ addl 1056(%esp), %ecx
+ adcl 1060(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1064(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1068(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1072(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1076(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1080(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1084(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1088(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1092(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1096(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1100(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1104(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 992(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1044(%esp), %ebp
+ adcl 1048(%esp), %esi
+ movl 1912(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 984(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 940(%esp), %edi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 976(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 864(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 876(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 884(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 916(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 920(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 856(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 800(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 816(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 828(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 852(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 736(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 764(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 772(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 780(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 728(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 672(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 608(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 616(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 644(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 600(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 544(%esp), %ecx
+ adcl 548(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 556(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 568(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 576(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 480(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 488(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 472(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ adcl 420(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ adcl 428(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 464(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 352(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 364(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 388(%esp), %edi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 344(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 288(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 296(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ adcl 324(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 224(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ adcl 264(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 216(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 160(%esp), %ecx
+ adcl 164(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 192(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 96(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 104(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ adcl 108(%esp), %esi
+ adcl 112(%esp), %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1916(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ebx
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %edi, %ecx
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 48(%ebp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 52(%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 92(%esp), %ebp # 4-byte Reload
+ js .LBB211_2
+# BB#1:
+ movl %edx, %ebp
+.LBB211_2:
+ movl 1904(%esp), %edx
+ movl %ebp, (%edx)
+ movl 88(%esp), %ebp # 4-byte Reload
+ js .LBB211_4
+# BB#3:
+ movl %ebx, %ebp
+.LBB211_4:
+ movl %ebp, 4(%edx)
+ js .LBB211_6
+# BB#5:
+ movl %eax, %esi
+.LBB211_6:
+ movl %esi, 8(%edx)
+ js .LBB211_8
+# BB#7:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB211_8:
+ movl %edi, 12(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB211_10
+# BB#9:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB211_10:
+ movl %eax, 16(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB211_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB211_12:
+ movl %eax, 20(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB211_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB211_14:
+ movl %eax, 24(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB211_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB211_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB211_18
+# BB#17:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB211_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB211_20
+# BB#19:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB211_20:
+ movl %eax, 36(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB211_22
+# BB#21:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB211_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB211_24
+# BB#23:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB211_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB211_26
+# BB#25:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB211_26:
+ movl %eax, 48(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB211_28
+# BB#27:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB211_28:
+ movl %eax, 52(%edx)
+ addl $1884, %esp # imm = 0x75C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end211:
+ .size mcl_fp_montNF14Lbmi2, .Lfunc_end211-mcl_fp_montNF14Lbmi2
+
+ .globl mcl_fp_montRed14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed14Lbmi2,@function
+mcl_fp_montRed14Lbmi2: # @mcl_fp_montRed14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1068, %esp # imm = 0x42C
+ calll .L212$pb
+.L212$pb:
+ popl %eax
+.Ltmp43:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1096(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1092(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 108(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 52(%ecx), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 40(%ecx), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1008(%esp), %ecx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1008(%esp), %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 944(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 976(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %esi # 4-byte Reload
+ adcl 1000(%esp), %esi
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 880(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 908(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 920(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 816(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 820(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 752(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 688(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 624(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 628(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 560(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1096(%esp), %eax
+ movl %eax, %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 496(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 432(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ adcl 456(%esp), %ebp
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 468(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ movl 88(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 368(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 392(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 408(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %ebp
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 304(%esp), %ebp
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 316(%esp), %ebp
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 240(%esp), %edi
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ adcl 248(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 268(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 176(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx # 4-byte Reload
+ adcl 188(%esp), %ebx
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 196(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl %edi, %eax
+ adcl 200(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 212(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 172(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 8(%esp), %ebp # 4-byte Folded Reload
+ sbbl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 168(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 148(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 156(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 152(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 124(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 140(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 128(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %edx # 4-byte Reload
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ sbbl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 144(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB212_2
+# BB#1:
+ movl %eax, 168(%esp) # 4-byte Spill
+.LBB212_2:
+ movl %esi, %edx
+ testb %dl, %dl
+ movl 172(%esp), %eax # 4-byte Reload
+ jne .LBB212_4
+# BB#3:
+ movl %edi, %eax
+.LBB212_4:
+ movl 1088(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ movl 160(%esp), %ecx # 4-byte Reload
+ jne .LBB212_6
+# BB#5:
+ movl %ebp, %ecx
+.LBB212_6:
+ movl %ecx, 4(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ jne .LBB212_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB212_8:
+ movl %ebp, 8(%edi)
+ movl 168(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 124(%esp), %ebp # 4-byte Reload
+ movl 136(%esp), %ebx # 4-byte Reload
+ jne .LBB212_10
+# BB#9:
+ movl 80(%esp), %ebx # 4-byte Reload
+.LBB212_10:
+ movl %ebx, 16(%edi)
+ movl 140(%esp), %ebx # 4-byte Reload
+ movl 148(%esp), %esi # 4-byte Reload
+ jne .LBB212_12
+# BB#11:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB212_12:
+ movl %esi, 20(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB212_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB212_14:
+ movl %eax, 24(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ jne .LBB212_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 152(%esp) # 4-byte Spill
+.LBB212_16:
+ movl 152(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB212_18
+# BB#17:
+ movl 96(%esp), %ebp # 4-byte Reload
+.LBB212_18:
+ movl %ebp, 32(%edi)
+ jne .LBB212_20
+# BB#19:
+ movl 100(%esp), %ebx # 4-byte Reload
+.LBB212_20:
+ movl %ebx, 36(%edi)
+ jne .LBB212_22
+# BB#21:
+ movl 112(%esp), %esi # 4-byte Reload
+.LBB212_22:
+ movl %esi, 40(%edi)
+ jne .LBB212_24
+# BB#23:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB212_24:
+ movl %edx, 44(%edi)
+ jne .LBB212_26
+# BB#25:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB212_26:
+ movl %ecx, 48(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB212_28
+# BB#27:
+ movl 144(%esp), %eax # 4-byte Reload
+.LBB212_28:
+ movl %eax, 52(%edi)
+ addl $1068, %esp # imm = 0x42C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end212:
+ .size mcl_fp_montRed14Lbmi2, .Lfunc_end212-mcl_fp_montRed14Lbmi2
+
+ .globl mcl_fp_addPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre14Lbmi2,@function
+mcl_fp_addPre14Lbmi2: # @mcl_fp_addPre14Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl 48(%eax), %edi
+ movl %esi, 40(%ebx)
+ movl 48(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 44(%ebx)
+ movl %esi, 48(%ebx)
+ movl 52(%eax), %eax
+ movl 52(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 52(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end213:
+ .size mcl_fp_addPre14Lbmi2, .Lfunc_end213-mcl_fp_addPre14Lbmi2
+
+ .globl mcl_fp_subPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre14Lbmi2,@function
+mcl_fp_subPre14Lbmi2: # @mcl_fp_subPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ebp)
+ movl 48(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 44(%ebp)
+ movl %edi, 48(%ebp)
+ movl 52(%edx), %edx
+ movl 52(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 52(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end214:
+ .size mcl_fp_subPre14Lbmi2, .Lfunc_end214-mcl_fp_subPre14Lbmi2
+
+ .globl mcl_fp_shr1_14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_14Lbmi2,@function
+mcl_fp_shr1_14Lbmi2: # @mcl_fp_shr1_14Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 48(%ecx)
+ shrl %eax
+ movl %eax, 52(%ecx)
+ popl %esi
+ retl
+.Lfunc_end215:
+ .size mcl_fp_shr1_14Lbmi2, .Lfunc_end215-mcl_fp_shr1_14Lbmi2
+
+ .globl mcl_fp_add14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add14Lbmi2,@function
+mcl_fp_add14Lbmi2: # @mcl_fp_add14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 68(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 8(%eax), %ecx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %ecx
+ adcl 12(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 16(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%ebp), %ecx
+ adcl 20(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %ecx
+ adcl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ adcl 28(%eax), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ adcl 32(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebp), %ecx
+ adcl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 40(%ebp), %edx
+ adcl 40(%eax), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 44(%ebp), %ebx
+ adcl 44(%eax), %ebx
+ movl 48(%ebp), %esi
+ adcl 48(%eax), %esi
+ movl 52(%ebp), %edi
+ adcl 52(%eax), %edi
+ movl 64(%esp), %eax
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %edx, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 76(%esp), %edx
+ subl (%edx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edx), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edx), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ sbbl 20(%edx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 32(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 36(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebp # 4-byte Reload
+ sbbl 40(%edx), %ebp
+ sbbl 44(%edx), %ebx
+ sbbl 48(%edx), %esi
+ sbbl 52(%edx), %edi
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB216_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %ebp, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+.LBB216_2: # %carry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end216:
+ .size mcl_fp_add14Lbmi2, .Lfunc_end216-mcl_fp_add14Lbmi2
+
+ .globl mcl_fp_addNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF14Lbmi2,@function
+mcl_fp_addNF14Lbmi2: # @mcl_fp_addNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 140(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 136(%esp), %ecx
+ addl (%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 4(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%eax), %ebp
+ movl 36(%eax), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %edx
+ adcl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 28(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 36(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 40(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 44(%ecx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%ecx), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%ecx), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ subl (%ecx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 4(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 24(%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%ecx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ movl %eax, %ebp
+ sbbl 44(%ecx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 48(%ecx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 52(%ecx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 72(%esp), %ecx # 4-byte Reload
+ js .LBB217_2
+# BB#1:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB217_2:
+ movl 132(%esp), %edi
+ movl %ecx, (%edi)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB217_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB217_4:
+ movl %eax, 4(%edi)
+ movl %edx, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB217_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB217_6:
+ movl %edx, 8(%edi)
+ movl %ebp, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB217_8
+# BB#7:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB217_8:
+ movl %ebp, 12(%edi)
+ movl 100(%esp), %ebp # 4-byte Reload
+ js .LBB217_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB217_10:
+ movl %eax, 16(%edi)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB217_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+.LBB217_12:
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ js .LBB217_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB217_14:
+ movl %ecx, 24(%edi)
+ js .LBB217_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB217_16:
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ js .LBB217_18
+# BB#17:
+ movl 32(%esp), %ebp # 4-byte Reload
+.LBB217_18:
+ movl %ebp, 32(%edi)
+ js .LBB217_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB217_20:
+ movl %ebx, 36(%edi)
+ js .LBB217_22
+# BB#21:
+ movl 40(%esp), %esi # 4-byte Reload
+.LBB217_22:
+ movl %esi, 40(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB217_24
+# BB#23:
+ movl 44(%esp), %edx # 4-byte Reload
+.LBB217_24:
+ movl %edx, 44(%edi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB217_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB217_26:
+ movl %eax, 48(%edi)
+ js .LBB217_28
+# BB#27:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB217_28:
+ movl %ecx, 52(%edi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end217:
+ .size mcl_fp_addNF14Lbmi2, .Lfunc_end217-mcl_fp_addNF14Lbmi2
+
+ .globl mcl_fp_sub14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub14Lbmi2,@function
+mcl_fp_sub14Lbmi2: # @mcl_fp_sub14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 80(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %edx
+ sbbl 36(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ sbbl 40(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esi), %ebp
+ sbbl 48(%edi), %ebp
+ movl 52(%esi), %esi
+ sbbl 52(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 72(%esp), %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl %ebp, 48(%ebx)
+ movl %esi, 52(%ebx)
+ je .LBB218_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 84(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+.LBB218_2: # %nocarry
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end218:
+ .size mcl_fp_sub14Lbmi2, .Lfunc_end218-mcl_fp_sub14Lbmi2
+
+ .globl mcl_fp_subNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF14Lbmi2,@function
+mcl_fp_subNF14Lbmi2: # @mcl_fp_subNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl 52(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 116(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ addl %ecx, %ecx
+ movl %esi, %ebp
+ adcl %ebp, %ebp
+ shrl $31, %eax
+ orl %ecx, %eax
+ movl 120(%esp), %edi
+ andl 4(%edi), %ebp
+ andl (%edi), %eax
+ movl 52(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ movl 12(%edi), %ecx
+ andl %esi, %ecx
+ andl 8(%edi), %esi
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl 108(%esp), %edi
+ movl %eax, (%edi)
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, 4(%edi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%edi)
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %eax, 48(%edi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%edi)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end219:
+ .size mcl_fp_subNF14Lbmi2, .Lfunc_end219-mcl_fp_subNF14Lbmi2
+
+ .globl mcl_fpDbl_add14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add14Lbmi2,@function
+mcl_fpDbl_add14Lbmi2: # @mcl_fpDbl_add14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %ecx
+ movl 124(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 120(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 64(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %edx, 48(%eax)
+ movl 56(%esi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ movl 100(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 104(%ecx), %edx
+ movl 104(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 108(%ecx), %ecx
+ movl 108(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 132(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ sbbl 40(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 52(%ebp), %esi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB220_2
+# BB#1:
+ movl %esi, %ebx
+.LBB220_2:
+ testb %dl, %dl
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB220_4
+# BB#3:
+ movl %ecx, %edx
+ movl (%esp), %edi # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB220_4:
+ movl 120(%esp), %esi
+ movl %eax, 56(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 60(%esi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%esi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esi)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esi)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esi)
+ movl %ebp, 84(%esi)
+ movl %edi, 88(%esi)
+ movl %edx, 92(%esi)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB220_6
+# BB#5:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB220_6:
+ movl %eax, 96(%esi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB220_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB220_8:
+ movl %edx, 100(%esi)
+ jne .LBB220_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB220_10:
+ movl %ecx, 104(%esi)
+ movl %ebx, 108(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end220:
+ .size mcl_fpDbl_add14Lbmi2, .Lfunc_end220-mcl_fpDbl_add14Lbmi2
+
+ .globl mcl_fpDbl_sub14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub14Lbmi2,@function
+mcl_fpDbl_sub14Lbmi2: # @mcl_fpDbl_sub14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 124(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 116(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl 40(%ebp), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %esi, %edx
+ movl 44(%ebp), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%ebx), %eax
+ sbbl %esi, %eax
+ movl 48(%ebp), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%ebx), %edx
+ sbbl %esi, %edx
+ movl 52(%ebp), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%ebx), %eax
+ sbbl %esi, %eax
+ movl 56(%ebp), %esi
+ movl %edx, 48(%ecx)
+ movl 56(%ebx), %edx
+ sbbl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%ebp), %edx
+ movl %eax, 52(%ecx)
+ movl 60(%ebx), %eax
+ sbbl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 88(%ebp), %eax
+ movl 88(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 92(%ebp), %eax
+ movl 92(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%ebp), %eax
+ movl 96(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 100(%ebp), %eax
+ movl 100(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 104(%ebp), %eax
+ movl 104(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 108(%ebp), %eax
+ movl 108(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 128(%esp), %ebp
+ jne .LBB221_1
+# BB#2:
+ movl $0, 56(%esp) # 4-byte Folded Spill
+ jmp .LBB221_3
+.LBB221_1:
+ movl 52(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB221_3:
+ testb %al, %al
+ jne .LBB221_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB221_6
+.LBB221_4:
+ movl (%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB221_6:
+ jne .LBB221_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB221_9
+.LBB221_7:
+ movl 48(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB221_9:
+ jne .LBB221_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB221_12
+.LBB221_10:
+ movl 44(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB221_12:
+ jne .LBB221_13
+# BB#14:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB221_15
+.LBB221_13:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB221_15:
+ jne .LBB221_16
+# BB#17:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB221_18
+.LBB221_16:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB221_18:
+ jne .LBB221_19
+# BB#20:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB221_21
+.LBB221_19:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB221_21:
+ jne .LBB221_22
+# BB#23:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB221_24
+.LBB221_22:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB221_24:
+ jne .LBB221_25
+# BB#26:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB221_27
+.LBB221_25:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB221_27:
+ jne .LBB221_28
+# BB#29:
+ movl $0, %esi
+ jmp .LBB221_30
+.LBB221_28:
+ movl 20(%ebp), %esi
+.LBB221_30:
+ jne .LBB221_31
+# BB#32:
+ movl $0, %edi
+ jmp .LBB221_33
+.LBB221_31:
+ movl 16(%ebp), %edi
+.LBB221_33:
+ jne .LBB221_34
+# BB#35:
+ movl $0, %ebx
+ jmp .LBB221_36
+.LBB221_34:
+ movl 12(%ebp), %ebx
+.LBB221_36:
+ jne .LBB221_37
+# BB#38:
+ xorl %ebp, %ebp
+ jmp .LBB221_39
+.LBB221_37:
+ movl 8(%ebp), %ebp
+.LBB221_39:
+ movl 20(%esp), %edx # 4-byte Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 64(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 72(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 76(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl %eax, 104(%ecx)
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%ecx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14Lbmi2, .Lfunc_end221-mcl_fpDbl_sub14Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv480x32,@function
+.LmulPv480x32: # @mulPv480x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl %edx, %eax
+ movl 72(%esp), %edi
+ movl %edi, %edx
+ mulxl 4(%eax), %ebx, %esi
+ movl %edi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 48(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 8(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 16(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 24(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 32(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 40(%eax), %edx, %ebp
+ adcl %esi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 44(%eax), %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %edi, %edx
+ mulxl 48(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %edi, %edx
+ mulxl 52(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %esi, 48(%ecx)
+ movl %edx, 52(%ecx)
+ movl %edi, %edx
+ mulxl 56(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ adcl $0, %edx
+ movl %edx, 60(%ecx)
+ movl %ecx, %eax
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+
+ .globl mcl_fp_mulUnitPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre15Lbmi2,@function
+mcl_fp_mulUnitPre15Lbmi2: # @mcl_fp_mulUnitPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L223$pb
+.L223$pb:
+ popl %ebx
+.Ltmp44:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15Lbmi2, .Lfunc_end223-mcl_fp_mulUnitPre15Lbmi2
+
+ .globl mcl_fpDbl_mulPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre15Lbmi2,@function
+mcl_fpDbl_mulPre15Lbmi2: # @mcl_fpDbl_mulPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L224$pb
+.L224$pb:
+ popl %esi
+.Ltmp45:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1108(%esp), %edi
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1112(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1108(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15Lbmi2, .Lfunc_end224-mcl_fpDbl_mulPre15Lbmi2
+
+ .globl mcl_fpDbl_sqrPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre15Lbmi2,@function
+mcl_fpDbl_sqrPre15Lbmi2: # @mcl_fpDbl_sqrPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L225$pb
+.L225$pb:
+ popl %ebx
+.Ltmp46:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 1108(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15Lbmi2, .Lfunc_end225-mcl_fpDbl_sqrPre15Lbmi2
+
+ .globl mcl_fp_mont15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont15Lbmi2,@function
+mcl_fp_mont15Lbmi2: # @mcl_fp_mont15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2044, %esp # imm = 0x7FC
+ calll .L226$pb
+.L226$pb:
+ popl %ebx
+.Ltmp47:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx
+ movl 2076(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1976(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 1976(%esp), %ebp
+ movl 1980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2036(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2032(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2028(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2024(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2020(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2000(%esp), %edi
+ movl 1996(%esp), %esi
+ movl 1992(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ addl 1912(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1932(%esp), %esi
+ adcl 1936(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1968(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1972(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1848(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1864(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1868(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1892(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1896(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1900(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1904(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1908(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2076(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1784(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1812(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1836(%esp), %esi
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1840(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1744(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1756(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1768(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1780(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1656(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 1656(%esp), %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1688(%esp), %ebp
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1712(%esp), %edi
+ adcl 1716(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1592(%esp), %ecx
+ movl 2068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1592(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1628(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1528(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1528(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1544(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1564(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1568(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1572(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1464(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 1464(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1476(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1496(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1500(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1504(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 1512(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1400(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1400(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1420(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1424(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1428(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1432(%esp), %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1448(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1452(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1364(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ adcl 1384(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1392(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1272(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1320(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1328(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2072(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1232(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1244(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1268(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 1144(%esp), %eax
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1156(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1180(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1196(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1080(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1128(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1016(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1032(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1044(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 952(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 964(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 976(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 888(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 908(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 924(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 856(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 864(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ adcl 876(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 760(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 776(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 800(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 804(%esp), %ebp
+ adcl 808(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 816(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 708(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 736(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 752(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 632(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 672(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 568(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 604(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 504(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 516(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 560(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 440(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 448(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 492(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 376(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 388(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 336(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 248(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 252(%esp), %esi
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 256(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 288(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl %esi, %ecx
+ movl 96(%esp), %esi # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ adcl 200(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 104(%esp), %ebx # 4-byte Reload
+ andl $1, %ebx
+ addl 120(%esp), %edi
+ movl %ebp, %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 132(%esp), %edi
+ adcl 136(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2076(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ecx
+ movl %edi, %eax
+ sbbl 8(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 12(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ebp), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ebp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%ebp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 48(%ebp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 52(%ebp), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 56(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB226_2
+# BB#1:
+ movl %edx, %ebp
+.LBB226_2:
+ movl 2064(%esp), %edx
+ movl %ebp, (%edx)
+ testb %bl, %bl
+ movl 116(%esp), %ebp # 4-byte Reload
+ jne .LBB226_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB226_4:
+ movl %ebp, 4(%edx)
+ jne .LBB226_6
+# BB#5:
+ movl %eax, %edi
+.LBB226_6:
+ movl %edi, 8(%edx)
+ jne .LBB226_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB226_8:
+ movl %esi, 12(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB226_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB226_10:
+ movl %eax, 16(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB226_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB226_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB226_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB226_14:
+ movl %eax, 24(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB226_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB226_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB226_18
+# BB#17:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB226_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB226_20
+# BB#19:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB226_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB226_22
+# BB#21:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB226_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB226_24
+# BB#23:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB226_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB226_26
+# BB#25:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB226_26:
+ movl %eax, 48(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB226_28
+# BB#27:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB226_28:
+ movl %eax, 52(%edx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB226_30
+# BB#29:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB226_30:
+ movl %eax, 56(%edx)
+ addl $2044, %esp # imm = 0x7FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end226:
+ .size mcl_fp_mont15Lbmi2, .Lfunc_end226-mcl_fp_mont15Lbmi2
+
+ .globl mcl_fp_montNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF15Lbmi2,@function
+mcl_fp_montNF15Lbmi2: # @mcl_fp_montNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2028, %esp # imm = 0x7EC
+ calll .L227$pb
+.L227$pb:
+ popl %ebx
+.Ltmp48:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx
+ movl 2060(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1960(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1960(%esp), %ebp
+ movl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2020(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1992(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1980(%esp), %esi
+ movl 1976(%esp), %edi
+ movl 1972(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1968(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1896(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1896(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1912(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1916(%esp), %esi
+ movl %esi, %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1944(%esp), %ebp
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1948(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1832(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1892(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 1832(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1840(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1844(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1848(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1876(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1880(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1768(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1784(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, %esi
+ adcl 1820(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1824(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1764(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1704(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1752(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1756(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1640(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1688(%esp), %edi
+ adcl 1692(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1700(%esp), %ebp
+ movl 2056(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1636(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1576(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1616(%esp), %esi
+ adcl 1620(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1632(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1512(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1512(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1532(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1548(%esp), %ebp
+ adcl 1552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1448(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1508(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1448(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1464(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1476(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1480(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1484(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1496(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1504(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1408(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1380(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 1324(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1368(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 1256(%esp), %eax
+ adcl 1260(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1296(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1304(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1312(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 1252(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ addl 1192(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1212(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1228(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1128(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1140(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1148(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1168(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1188(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1124(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 1064(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1072(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1084(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1100(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1120(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1000(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1020(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1028(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1060(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 996(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 936(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 952(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 968(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 984(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 992(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 872(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 876(%esp), %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 880(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 868(%esp), %eax
+ movl %ebp, %ecx
+ addl 808(%esp), %ecx
+ adcl 812(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 816(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 820(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 824(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 828(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 832(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 836(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 840(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 844(%esp), %esi
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 848(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 852(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 856(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 860(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 864(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 744(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 776(%esp), %edi
+ adcl 780(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 792(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 680(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 740(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 680(%esp), %ecx
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 684(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 688(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 692(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 696(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 704(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 716(%esp), %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 728(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 732(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 616(%esp), %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 620(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 644(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ adcl 652(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 612(%esp), %edx
+ movl %esi, %ecx
+ addl 552(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 580(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 488(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 508(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 512(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 484(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 460(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 360(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 368(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 296(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 356(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 296(%esp), %ecx
+ adcl 300(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 332(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 232(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 240(%esp), %ebp
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 244(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 228(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 168(%esp), %ecx
+ adcl 172(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 176(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 188(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 104(%esp), %edi
+ movl 68(%esp), %edi # 4-byte Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ adcl 116(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 120(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 148(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2060(%esp), %ecx
+ subl (%ecx), %edx
+ movl %ebx, %ebp
+ sbbl 4(%ecx), %ebp
+ movl %edi, %ebx
+ sbbl 8(%ecx), %ebx
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %eax
+ sbbl 16(%ecx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 20(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 28(%ecx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 32(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 44(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 48(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 52(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%ecx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB227_2
+# BB#1:
+ movl %edx, %ecx
+.LBB227_2:
+ movl 2048(%esp), %edx
+ movl %ecx, (%edx)
+ movl 92(%esp), %esi # 4-byte Reload
+ js .LBB227_4
+# BB#3:
+ movl %ebp, %esi
+.LBB227_4:
+ movl %esi, 4(%edx)
+ movl 88(%esp), %ecx # 4-byte Reload
+ js .LBB227_6
+# BB#5:
+ movl %ebx, %edi
+.LBB227_6:
+ movl %edi, 8(%edx)
+ js .LBB227_8
+# BB#7:
+ movl %eax, %ecx
+.LBB227_8:
+ movl %ecx, 12(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB227_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB227_10:
+ movl %eax, 16(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB227_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB227_12:
+ movl %eax, 20(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB227_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB227_14:
+ movl %eax, 24(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB227_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB227_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB227_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB227_18:
+ movl %eax, 32(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB227_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB227_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB227_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB227_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB227_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB227_24:
+ movl %eax, 44(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB227_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB227_26:
+ movl %eax, 48(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB227_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB227_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB227_30
+# BB#29:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB227_30:
+ movl %eax, 56(%edx)
+ addl $2028, %esp # imm = 0x7EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end227:
+ .size mcl_fp_montNF15Lbmi2, .Lfunc_end227-mcl_fp_montNF15Lbmi2
+
+ .globl mcl_fp_montRed15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed15Lbmi2,@function
+mcl_fp_montRed15Lbmi2: # @mcl_fp_montRed15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1148, %esp # imm = 0x47C
+ calll .L228$pb
+.L228$pb:
+ popl %eax
+.Ltmp49:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1176(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 1172(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 116(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 176(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1080(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 1080(%esp), %eax
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ adcl 1088(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 1092(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1108(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1016(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1020(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 952(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 888(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 892(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 824(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 828(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %esi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 696(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 636(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 568(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 604(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1176(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 440(%esp), %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 452(%esp), %edi
+ adcl 456(%esp), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %esi # 4-byte Reload
+ adcl 464(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 380(%esp), %ebp
+ adcl 384(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ adcl 396(%esp), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 312(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 340(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 248(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 284(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 184(%esp), %esi
+ movl 172(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 240(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edx, %eax
+ subl 16(%esp), %edx # 4-byte Folded Reload
+ sbbl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ sbbl 8(%esp), %eax # 4-byte Folded Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ sbbl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ movl 144(%esp), %edi # 4-byte Reload
+ sbbl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ sbbl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ sbbl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ sbbl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ sbbl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ sbbl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 156(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, %edi
+ jne .LBB228_2
+# BB#1:
+ movl %edx, 172(%esp) # 4-byte Spill
+.LBB228_2:
+ movl 1168(%esp), %edx
+ movl 172(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edx)
+ movl %edi, %ebx
+ testb %bl, %bl
+ jne .LBB228_4
+# BB#3:
+ movl %ecx, 180(%esp) # 4-byte Spill
+.LBB228_4:
+ movl 180(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edx)
+ movl 176(%esp), %ecx # 4-byte Reload
+ jne .LBB228_6
+# BB#5:
+ movl %eax, %ecx
+.LBB228_6:
+ movl %ecx, 8(%edx)
+ movl 164(%esp), %eax # 4-byte Reload
+ jne .LBB228_8
+# BB#7:
+ movl %ebp, %eax
+.LBB228_8:
+ movl %eax, 12(%edx)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ movl 168(%esp), %ebp # 4-byte Reload
+ jne .LBB228_10
+# BB#9:
+ movl %esi, %ebp
+.LBB228_10:
+ movl %ebp, 16(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB228_12
+# BB#11:
+ movl 84(%esp), %ebx # 4-byte Reload
+.LBB228_12:
+ movl %ebx, 20(%edx)
+ movl 132(%esp), %ebx # 4-byte Reload
+ movl 160(%esp), %edi # 4-byte Reload
+ jne .LBB228_14
+# BB#13:
+ movl 88(%esp), %edi # 4-byte Reload
+.LBB228_14:
+ movl %edi, 24(%edx)
+ movl 128(%esp), %edi # 4-byte Reload
+ jne .LBB228_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB228_16:
+ movl %eax, 28(%edx)
+ movl 116(%esp), %esi # 4-byte Reload
+ jne .LBB228_18
+# BB#17:
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 136(%esp) # 4-byte Spill
+.LBB228_18:
+ movl 136(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edx)
+ jne .LBB228_20
+# BB#19:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB228_20:
+ movl %ebp, 36(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB228_22
+# BB#21:
+ movl 112(%esp), %ebx # 4-byte Reload
+.LBB228_22:
+ movl %ebx, 40(%edx)
+ jne .LBB228_24
+# BB#23:
+ movl 120(%esp), %edi # 4-byte Reload
+.LBB228_24:
+ movl %edi, 44(%edx)
+ jne .LBB228_26
+# BB#25:
+ movl 124(%esp), %esi # 4-byte Reload
+.LBB228_26:
+ movl %esi, 48(%edx)
+ jne .LBB228_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB228_28:
+ movl %eax, 52(%edx)
+ jne .LBB228_30
+# BB#29:
+ movl 156(%esp), %ecx # 4-byte Reload
+.LBB228_30:
+ movl %ecx, 56(%edx)
+ addl $1148, %esp # imm = 0x47C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end228:
+ .size mcl_fp_montRed15Lbmi2, .Lfunc_end228-mcl_fp_montRed15Lbmi2
+
+ .globl mcl_fp_addPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre15Lbmi2,@function
+mcl_fp_addPre15Lbmi2: # @mcl_fp_addPre15Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 48(%edi)
+ movl %edx, 52(%edi)
+ movl 56(%eax), %eax
+ movl 56(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end229:
+ .size mcl_fp_addPre15Lbmi2, .Lfunc_end229-mcl_fp_addPre15Lbmi2
+
+ .globl mcl_fp_subPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre15Lbmi2,@function
+mcl_fp_subPre15Lbmi2: # @mcl_fp_subPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 48(%ebx)
+ movl %esi, 52(%ebx)
+ movl 56(%edx), %edx
+ movl 56(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 56(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end230:
+ .size mcl_fp_subPre15Lbmi2, .Lfunc_end230-mcl_fp_subPre15Lbmi2
+
+ .globl mcl_fp_shr1_15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_15Lbmi2,@function
+mcl_fp_shr1_15Lbmi2: # @mcl_fp_shr1_15Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 52(%ecx)
+ shrl %eax
+ movl %eax, 56(%ecx)
+ popl %esi
+ retl
+.Lfunc_end231:
+ .size mcl_fp_shr1_15Lbmi2, .Lfunc_end231-mcl_fp_shr1_15Lbmi2
+
+ .globl mcl_fp_add15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add15Lbmi2,@function
+mcl_fp_add15Lbmi2: # @mcl_fp_add15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 76(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ adcl 8(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl 12(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ adcl 20(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ adcl 24(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ adcl 28(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ adcl 32(%ecx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%eax), %edx
+ adcl 36(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %edx
+ adcl 40(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 44(%eax), %ebx
+ adcl 44(%ecx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 48(%eax), %ebp
+ adcl 48(%ecx), %ebp
+ movl 52(%eax), %edi
+ adcl 52(%ecx), %edi
+ movl 56(%eax), %edx
+ adcl 56(%ecx), %edx
+ movl 68(%esp), %ecx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ecx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ecx)
+ movl 40(%esp), %esi # 4-byte Reload
+ movl %esi, 8(%ecx)
+ movl 36(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%ecx)
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%ecx)
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, 20(%ecx)
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ecx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 28(%ecx)
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 32(%ecx)
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 36(%ecx)
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 80(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 8(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ sbbl 12(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 16(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ sbbl 20(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ sbbl 24(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ sbbl 28(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ sbbl 32(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ sbbl 36(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ sbbl 40(%esi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ sbbl 44(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 48(%esi), %ebp
+ sbbl 52(%esi), %edi
+ sbbl 56(%esi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB232_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ecx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 28(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 32(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 36(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %eax, 56(%ecx)
+.LBB232_2: # %carry
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end232:
+ .size mcl_fp_add15Lbmi2, .Lfunc_end232-mcl_fp_add15Lbmi2
+
+ .globl mcl_fp_addNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF15Lbmi2,@function
+mcl_fp_addNF15Lbmi2: # @mcl_fp_addNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl 148(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl 144(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %edx
+ movl 8(%ecx), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 44(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 152(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esi), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 40(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 44(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 48(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ movl %ecx, %ebx
+ sbbl 52(%esi), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 56(%esi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB233_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB233_2:
+ movl 140(%esp), %edi
+ movl %esi, (%edi)
+ movl 84(%esp), %ecx # 4-byte Reload
+ js .LBB233_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB233_4:
+ movl %ecx, 4(%edi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ js .LBB233_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB233_6:
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edi)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB233_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB233_8:
+ movl %eax, 12(%edi)
+ movl %ebx, %ebp
+ movl %edx, %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB233_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB233_10:
+ movl %edx, 16(%edi)
+ movl 112(%esp), %edx # 4-byte Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ js .LBB233_12
+# BB#11:
+ movl 20(%esp), %esi # 4-byte Reload
+.LBB233_12:
+ movl %esi, 20(%edi)
+ js .LBB233_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+.LBB233_14:
+ movl 88(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%edi)
+ js .LBB233_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB233_16:
+ movl %ecx, 28(%edi)
+ js .LBB233_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB233_18:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ js .LBB233_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB233_20:
+ movl %ebx, 36(%edi)
+ js .LBB233_22
+# BB#21:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB233_22:
+ movl %edx, 40(%edi)
+ js .LBB233_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB233_24:
+ movl %eax, 44(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB233_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB233_26:
+ movl %eax, 48(%edi)
+ js .LBB233_28
+# BB#27:
+ movl 52(%esp), %ebp # 4-byte Reload
+.LBB233_28:
+ movl %ebp, 52(%edi)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB233_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB233_30:
+ movl %eax, 56(%edi)
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end233:
+ .size mcl_fp_addNF15Lbmi2, .Lfunc_end233-mcl_fp_addNF15Lbmi2
+
+ .globl mcl_fp_sub15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub15Lbmi2,@function
+mcl_fp_sub15Lbmi2: # @mcl_fp_sub15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 84(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %edx
+ sbbl 40(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ sbbl 48(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 52(%esi), %ebp
+ sbbl 52(%edi), %ebp
+ movl 56(%esi), %esi
+ sbbl 56(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 76(%esp), %ebx
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl %edx, 40(%ebx)
+ movl %ecx, 44(%ebx)
+ movl %eax, 48(%ebx)
+ movl %ebp, 52(%ebx)
+ movl %esi, 56(%ebx)
+ je .LBB234_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ebx)
+.LBB234_2: # %nocarry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end234:
+ .size mcl_fp_sub15Lbmi2, .Lfunc_end234-mcl_fp_sub15Lbmi2
+
+ .globl mcl_fp_subNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF15Lbmi2,@function
+mcl_fp_subNF15Lbmi2: # @mcl_fp_subNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 124(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ sarl $31, %ebp
+ movl %ebp, %edi
+ shldl $1, %eax, %edi
+ movl 128(%esp), %edx
+ andl (%edx), %edi
+ movl 56(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ebp, %ebx
+ movl 16(%edx), %esi
+ andl %ebp, %esi
+ movl 12(%edx), %ecx
+ andl %ebp, %ecx
+ movl 8(%edx), %eax
+ andl %ebp, %eax
+ andl 4(%edx), %ebp
+ addl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 116(%esp), %edx
+ movl %edi, (%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 4(%edx)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 8(%edx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 20(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%edx)
+ movl %eax, 52(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%edx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end235:
+ .size mcl_fp_subNF15Lbmi2, .Lfunc_end235-mcl_fp_subNF15Lbmi2
+
+ .globl mcl_fpDbl_add15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add15Lbmi2,@function
+mcl_fpDbl_add15Lbmi2: # @mcl_fpDbl_add15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %ecx
+ movl 132(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 128(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 68(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %edi
+ movl %esi, 56(%eax)
+ movl 64(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl 72(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl 76(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ movl 104(%edx), %esi
+ adcl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edi
+ movl 108(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 112(%ecx), %ebx
+ movl 112(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %ecx
+ movl 116(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 140(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ sbbl 44(%ebp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edx, %edi
+ sbbl 52(%ebp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sbbl 56(%ebp), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB236_2
+# BB#1:
+ movl %esi, %edi
+.LBB236_2:
+ testb %bl, %bl
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ movl 68(%esp), %ebx # 4-byte Reload
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB236_4
+# BB#3:
+ movl %ecx, %esi
+ movl (%esp), %ebx # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB236_4:
+ movl 128(%esp), %edx
+ movl %eax, 60(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%edx)
+ movl %ebp, 92(%edx)
+ movl %ebx, 96(%edx)
+ movl %esi, 100(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB236_6
+# BB#5:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB236_6:
+ movl %eax, 104(%edx)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB236_8
+# BB#7:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB236_8:
+ movl %eax, 108(%edx)
+ jne .LBB236_10
+# BB#9:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB236_10:
+ movl %ecx, 112(%edx)
+ movl %edi, 116(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end236:
+ .size mcl_fpDbl_add15Lbmi2, .Lfunc_end236-mcl_fpDbl_add15Lbmi2
+
+ .globl mcl_fpDbl_sub15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub15Lbmi2,@function
+mcl_fpDbl_sub15Lbmi2: # @mcl_fpDbl_sub15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 128(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %edi
+ sbbl 8(%ebp), %edi
+ movl 120(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %edx
+ sbbl %edi, %edx
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%eax), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%eax), %edx
+ sbbl %edi, %edx
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%eax), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%eax), %edx
+ sbbl %edi, %edx
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%eax), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %edx, 36(%ecx)
+ movl 44(%eax), %edx
+ sbbl %edi, %edx
+ movl 48(%ebp), %edi
+ movl %esi, 40(%ecx)
+ movl 48(%eax), %esi
+ sbbl %edi, %esi
+ movl 52(%ebp), %edi
+ movl %edx, 44(%ecx)
+ movl 52(%eax), %edx
+ sbbl %edi, %edx
+ movl 56(%ebp), %edi
+ movl %esi, 48(%ecx)
+ movl 56(%eax), %esi
+ sbbl %edi, %esi
+ movl 60(%ebp), %edi
+ movl %edx, 52(%ecx)
+ movl 60(%eax), %edx
+ sbbl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %edx
+ movl %esi, 56(%ecx)
+ movl 64(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%ebp), %edx
+ movl 68(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%ebp), %edx
+ movl 72(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 76(%ebp), %edx
+ movl 76(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 80(%ebp), %edx
+ movl 80(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%ebp), %edx
+ movl 84(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 88(%ebp), %edx
+ movl 88(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 92(%ebp), %edx
+ movl 92(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 96(%ebp), %edx
+ movl 96(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%ebp), %edx
+ movl 100(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 104(%ebp), %edx
+ movl 104(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 108(%ebp), %edx
+ movl 108(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 112(%ebp), %edx
+ movl 112(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 116(%ebp), %edx
+ movl 116(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 132(%esp), %esi
+ jne .LBB237_1
+# BB#2:
+ movl $0, 60(%esp) # 4-byte Folded Spill
+ jmp .LBB237_3
+.LBB237_1:
+ movl 56(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+.LBB237_3:
+ testb %al, %al
+ jne .LBB237_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB237_6
+.LBB237_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB237_6:
+ jne .LBB237_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB237_9
+.LBB237_7:
+ movl 52(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB237_9:
+ jne .LBB237_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB237_12
+.LBB237_10:
+ movl 48(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB237_12:
+ jne .LBB237_13
+# BB#14:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB237_15
+.LBB237_13:
+ movl 44(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB237_15:
+ jne .LBB237_16
+# BB#17:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB237_18
+.LBB237_16:
+ movl 40(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB237_18:
+ jne .LBB237_19
+# BB#20:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB237_21
+.LBB237_19:
+ movl 36(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB237_21:
+ jne .LBB237_22
+# BB#23:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB237_24
+.LBB237_22:
+ movl 32(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB237_24:
+ jne .LBB237_25
+# BB#26:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB237_27
+.LBB237_25:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB237_27:
+ jne .LBB237_28
+# BB#29:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB237_30
+.LBB237_28:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB237_30:
+ jne .LBB237_31
+# BB#32:
+ movl $0, %edx
+ jmp .LBB237_33
+.LBB237_31:
+ movl 20(%esi), %edx
+.LBB237_33:
+ jne .LBB237_34
+# BB#35:
+ movl $0, %ebp
+ jmp .LBB237_36
+.LBB237_34:
+ movl 16(%esi), %ebp
+.LBB237_36:
+ jne .LBB237_37
+# BB#38:
+ movl $0, %eax
+ jmp .LBB237_39
+.LBB237_37:
+ movl 12(%esi), %eax
+.LBB237_39:
+ jne .LBB237_40
+# BB#41:
+ xorl %esi, %esi
+ jmp .LBB237_42
+.LBB237_40:
+ movl 8(%esi), %esi
+.LBB237_42:
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 60(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 64(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 76(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl %eax, 112(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15Lbmi2, .Lfunc_end237-mcl_fpDbl_sub15Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv512x32,@function
+.LmulPv512x32: # @mulPv512x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl %edx, %eax
+ movl 76(%esp), %edi
+ movl %edi, %edx
+ mulxl 4(%eax), %ebx, %esi
+ movl %edi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 52(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 8(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 16(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 24(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 32(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 40(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 44(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 48(%eax), %ebx, %ebp
+ adcl %esi, %ebx
+ movl %edi, %edx
+ mulxl 52(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %edi, %edx
+ mulxl 56(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl %esi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ movl %edi, %edx
+ mulxl 60(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl $0, %edx
+ movl %edx, 64(%ecx)
+ movl %ecx, %eax
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+
+ .globl mcl_fp_mulUnitPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre16Lbmi2,@function
+mcl_fp_mulUnitPre16Lbmi2: # @mcl_fp_mulUnitPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L239$pb
+.L239$pb:
+ popl %ebx
+.Ltmp50:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16Lbmi2, .Lfunc_end239-mcl_fp_mulUnitPre16Lbmi2
+
+ .globl mcl_fpDbl_mulPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre16Lbmi2,@function
+mcl_fpDbl_mulPre16Lbmi2: # @mcl_fpDbl_mulPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L240$pb
+.L240$pb:
+ popl %ebx
+.Ltmp51:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx
+ movl %ebx, -224(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 32(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 64(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl 52(%esi), %ebx
+ movl 48(%esi), %eax
+ movl 44(%esi), %ecx
+ movl 40(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl (%esi), %edi
+ movl 4(%esi), %edx
+ addl 32(%esi), %edi
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 36(%edi), %edx
+ movl %edx, -236(%ebp) # 4-byte Spill
+ movl -176(%ebp), %edx # 4-byte Reload
+ adcl 8(%edi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ebx
+ movl %ebx, -228(%ebp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ecx
+ popl %eax
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %ecx
+ addl 32(%esi), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ adcl 36(%esi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ adcl 12(%esi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 16(%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl 56(%esi), %edx
+ adcl 24(%esi), %edx
+ movl 60(%esi), %ecx
+ adcl 28(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ebx
+ popl %eax
+ movl %ebx, -252(%ebp) # 4-byte Spill
+ movl -212(%ebp), %ebx # 4-byte Reload
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -216(%ebp) # 4-byte Spill
+ movl -184(%ebp), %esi # 4-byte Reload
+ movl %esi, -220(%ebp) # 4-byte Spill
+ jb .LBB240_2
+# BB#1:
+ xorl %eax, %eax
+ xorl %ebx, %ebx
+ movl $0, -216(%ebp) # 4-byte Folded Spill
+ movl $0, -220(%ebp) # 4-byte Folded Spill
+.LBB240_2:
+ movl %ebx, -244(%ebp) # 4-byte Spill
+ movl %eax, -240(%ebp) # 4-byte Spill
+ movl 60(%edi), %eax
+ movl -144(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 28(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl %edx, -144(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -168(%ebp) # 4-byte Spill
+ jb .LBB240_4
+# BB#3:
+ movl $0, -172(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+ movl $0, -168(%ebp) # 4-byte Folded Spill
+.LBB240_4:
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl -236(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl %edi, -100(%ebp)
+ movl -232(%ebp), %edi # 4-byte Reload
+ movl %edi, -96(%ebp)
+ movl -212(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -228(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl -248(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -84(%ebp)
+ movl -188(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -140(%ebp)
+ movl -192(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -136(%ebp)
+ movl -196(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -132(%ebp)
+ movl -200(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -128(%ebp)
+ movl -204(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -124(%ebp)
+ movl -208(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -120(%ebp)
+ movl %esi, %ebx
+ movl %edi, %esi
+ movl %eax, %edi
+ movl %edx, -116(%ebp)
+ movl %ecx, -112(%ebp)
+ sbbl %edx, %edx
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -80(%ebp)
+ movl -252(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB240_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+ movl $0, %edi
+.LBB240_6:
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -140(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -108(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -76(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -220(%ebp), %eax # 4-byte Reload
+ addl %eax, -168(%ebp) # 4-byte Folded Spill
+ adcl %edi, -164(%ebp) # 4-byte Folded Spill
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl %eax, -160(%ebp) # 4-byte Folded Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl %eax, -152(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -148(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl -224(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl -168(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -172(%ebp) # 4-byte Spill
+ adcl %esi, -176(%ebp) # 4-byte Folded Spill
+ movl -76(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -72(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -68(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl -52(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ movl -48(%ebp), %eax
+ sbbl 28(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ sbbl $0, -176(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -260(%ebp) # 4-byte Spill
+ subl %eax, -196(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -264(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 72(%esi), %eax
+ movl %eax, -268(%ebp) # 4-byte Spill
+ sbbl %eax, -192(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -272(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 80(%esi), %eax
+ movl %eax, -276(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 84(%esi), %eax
+ movl %eax, -280(%ebp) # 4-byte Spill
+ sbbl %eax, -180(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -284(%ebp) # 4-byte Spill
+ sbbl %eax, -184(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -288(%ebp) # 4-byte Spill
+ sbbl %eax, -188(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -292(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -144(%ebp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -196(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -192(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ adcl -212(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl -220(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl -224(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl -188(%ebp), %edx # 4-byte Reload
+ adcl -228(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ adcl -292(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %edi, 96(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16Lbmi2, .Lfunc_end240-mcl_fpDbl_mulPre16Lbmi2
+
+ .globl mcl_fpDbl_sqrPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre16Lbmi2,@function
+mcl_fpDbl_sqrPre16Lbmi2: # @mcl_fpDbl_sqrPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L241$pb
+.L241$pb:
+ popl %ebx
+.Ltmp52:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx
+ movl %ebx, -184(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 64(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl 52(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 44(%edi), %ebx
+ movl 40(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 32(%edi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ adcl 36(%edi), %edx
+ movl %edx, -196(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ebx
+ adcl 16(%edi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ addl %ecx, %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ adcl %edx, %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ adcl %esi, %esi
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %esi
+ adcl %edx, %edx
+ movl %edx, -152(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, %edx
+ movl %eax, %ebx
+ adcl %edx, %edx
+ movl %edx, -148(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 56(%edi), %edx
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ adcl 24(%edi), %edx
+ movl 60(%edi), %ecx
+ adcl 28(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ sbbl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_2
+# BB#1:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+.LBB241_2:
+ movl %edx, %eax
+ movl -172(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl %eax, %eax
+ movl %ecx, %edi
+ adcl %edi, %edi
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB241_4
+# BB#3:
+ movl $0, -176(%ebp) # 4-byte Folded Spill
+ xorl %eax, %eax
+.LBB241_4:
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl %eax, -140(%ebp)
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl %eax, -136(%ebp)
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -100(%ebp)
+ movl %eax, -132(%ebp)
+ movl %esi, -96(%ebp)
+ movl %esi, -128(%ebp)
+ movl %ebx, -92(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -120(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -116(%ebp)
+ movl %ecx, -80(%ebp)
+ movl %ecx, -112(%ebp)
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_5
+# BB#6:
+ xorl %edi, %edi
+ jmp .LBB241_7
+.LBB241_5:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB241_7:
+ leal -140(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -108(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -76(%ebp), %eax
+ movl %eax, (%esp)
+ movl -168(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -184(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl -164(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ adcl %edi, %esi
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl -76(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ movl -72(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -68(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -192(%ebp) # 4-byte Spill
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl %eax, %ecx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -52(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 28(%esi), %edx
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ movl -168(%ebp), %eax # 4-byte Reload
+ sbbl $0, %eax
+ movl 64(%esi), %ecx
+ movl %ecx, -260(%ebp) # 4-byte Spill
+ subl %ecx, -180(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %ecx
+ movl %ecx, -264(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 72(%esi), %ecx
+ movl %ecx, -268(%ebp) # 4-byte Spill
+ sbbl %ecx, -184(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %ecx
+ movl %ecx, -272(%ebp) # 4-byte Spill
+ sbbl %ecx, -192(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %ecx
+ movl %ecx, -276(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 84(%esi), %ecx
+ movl %ecx, -280(%ebp) # 4-byte Spill
+ sbbl %ecx, -196(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %ecx
+ movl %ecx, -284(%ebp) # 4-byte Spill
+ sbbl %ecx, -188(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %ecx
+ movl %ecx, -288(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 96(%esi), %ecx
+ movl %ecx, -292(%ebp) # 4-byte Spill
+ sbbl %ecx, -164(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ sbbl %ecx, -160(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %ecx
+ movl %ecx, -236(%ebp) # 4-byte Spill
+ sbbl %ecx, -156(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %ecx
+ movl %ecx, -240(%ebp) # 4-byte Spill
+ sbbl %ecx, -152(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %ecx
+ movl %ecx, -244(%ebp) # 4-byte Spill
+ sbbl %ecx, -148(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %ecx
+ movl %ecx, -248(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %ecx
+ movl %ecx, -252(%ebp) # 4-byte Spill
+ sbbl %ecx, -172(%ebp) # 4-byte Folded Spill
+ movl 124(%esi), %ecx
+ movl %ecx, -256(%ebp) # 4-byte Spill
+ sbbl %ecx, -176(%ebp) # 4-byte Folded Spill
+ sbbl $0, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -192(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -196(%ebp), %ecx # 4-byte Reload
+ adcl -220(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -188(%ebp), %eax # 4-byte Reload
+ adcl -224(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %edx, %ecx
+ adcl -228(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -160(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -292(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %eax, 96(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16Lbmi2, .Lfunc_end241-mcl_fpDbl_sqrPre16Lbmi2
+
+ .globl mcl_fp_mont16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont16Lbmi2,@function
+mcl_fp_mont16Lbmi2: # @mcl_fp_mont16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2428, %esp # imm = 0x97C
+ calll .L242$pb
+.L242$pb:
+ popl %ebx
+.Ltmp53:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx
+ movl 2460(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2360(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 2360(%esp), %ebp
+ movl 2364(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2424(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2420(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 2416(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2412(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2408(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2380(%esp), %edi
+ movl 2376(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2368(%esp), %esi
+ movl %eax, (%esp)
+ leal 2288(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ addl 2288(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 2296(%esp), %esi
+ movl %esi, %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2308(%esp), %edi
+ movl %edi, %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2344(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 2456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2216(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2216(%esp), %ecx
+ adcl 2220(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2232(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 2236(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2280(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2144(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ addl 2144(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2164(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 2168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2208(%esp), %esi
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2072(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2072(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2076(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2092(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 2108(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2116(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2128(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2000(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2000(%esp), %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2004(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2008(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 2036(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2044(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2052(%esp), %ebp
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 2056(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1928(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1928(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1976(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 1980(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1856(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1856(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1892(%esp), %esi
+ adcl 1896(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1900(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1904(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1908(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1912(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1784(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1816(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1824(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1712(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1764(%esp), %ebp
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1768(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1640(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1668(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1692(%esp), %esi
+ movl %esi, %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1568(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1620(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1632(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1496(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1496(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1500(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1504(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1516(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1424(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1424(%esp), %eax
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1432(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1444(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1480(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 2456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1352(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1352(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1396(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1404(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1412(%esp), %esi
+ adcl 1416(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1280(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1288(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1340(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ adcl 1212(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1272(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1136(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1164(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1192(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1064(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1116(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 992(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1020(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1032(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 920(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 932(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 968(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 848(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 868(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 896(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 776(%esp), %ecx
+ adcl 780(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 792(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 800(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 704(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 728(%esp), %esi
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 752(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 632(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 676(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 680(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 560(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 592(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 612(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 508(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 520(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 416(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ adcl 436(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 348(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 356(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 360(%esp), %edi
+ adcl 364(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 272(%esp), %esi
+ adcl 276(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 296(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 200(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 212(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 220(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 232(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ addl 128(%esp), %esi
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 156(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 172(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 192(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %eax, %edx
+ movl 2460(%esp), %edi
+ subl (%edi), %edx
+ movl %ecx, %eax
+ sbbl 4(%edi), %eax
+ movl %ebx, %ecx
+ sbbl 8(%edi), %ecx
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 40(%edi), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 44(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ sbbl 52(%edi), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ sbbl 60(%edi), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ sbbl $0, %ebp
+ andl $1, %ebp
+ movl %ebp, %ebx
+ jne .LBB242_2
+# BB#1:
+ movl %edx, %edi
+.LBB242_2:
+ movl 2448(%esp), %edx
+ movl %edi, (%edx)
+ testb %bl, %bl
+ movl 108(%esp), %edi # 4-byte Reload
+ jne .LBB242_4
+# BB#3:
+ movl %eax, %edi
+.LBB242_4:
+ movl %edi, 4(%edx)
+ jne .LBB242_6
+# BB#5:
+ movl %ecx, 104(%esp) # 4-byte Spill
+.LBB242_6:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ jne .LBB242_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+.LBB242_8:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB242_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB242_10:
+ movl %eax, 16(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ jne .LBB242_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB242_12:
+ movl %eax, 20(%edx)
+ jne .LBB242_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB242_14:
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB242_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB242_16:
+ movl %eax, 28(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB242_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB242_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB242_20
+# BB#19:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB242_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB242_22
+# BB#21:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB242_22:
+ movl %eax, 40(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB242_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB242_24:
+ movl %eax, 44(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB242_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB242_26:
+ movl %eax, 48(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB242_28
+# BB#27:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB242_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB242_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB242_30:
+ movl %eax, 56(%edx)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB242_32
+# BB#31:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB242_32:
+ movl %eax, 60(%edx)
+ addl $2428, %esp # imm = 0x97C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end242:
+ .size mcl_fp_mont16Lbmi2, .Lfunc_end242-mcl_fp_mont16Lbmi2
+
+ .globl mcl_fp_montNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF16Lbmi2,@function
+mcl_fp_montNF16Lbmi2: # @mcl_fp_montNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2412, %esp # imm = 0x96C
+ calll .L243$pb
+.L243$pb:
+ popl %ebx
+.Ltmp54:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx
+ movl 2444(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2344(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2344(%esp), %edi
+ movl 2348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2408(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2380(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2376(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2368(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2364(%esp), %ebp
+ movl 2360(%esp), %esi
+ movl 2356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2352(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2272(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2272(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 2288(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2292(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 2308(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 2328(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2200(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2264(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 2200(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2208(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2216(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 2232(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2236(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2252(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2260(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2128(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2128(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 2156(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2164(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2188(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2192(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2056(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2120(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 2056(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 2080(%esp), %edi
+ movl %edi, %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2088(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2112(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 2116(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1984(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1984(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 2004(%esp), %edi
+ adcl 2008(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 2036(%esp), %ebp
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2040(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2044(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1976(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1912(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1928(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1936(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 1948(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1960(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1964(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1968(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 2444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ addl 1840(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1876(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1892(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1904(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2436(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 1832(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1768(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1808(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1828(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1696(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1716(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1720(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1624(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1688(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1624(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1640(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1648(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1552(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1552(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1556(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1576(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1592(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1600(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1616(%esp), %edi
+ movl 2440(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1480(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1544(%esp), %eax
+ addl 1480(%esp), %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1484(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1488(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1504(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1508(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ adcl 1524(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1532(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1536(%esp), %ebp
+ adcl 1540(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1408(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1408(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1464(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1468(%esp), %ebp
+ adcl 1472(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1400(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ adcl 1340(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1344(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1348(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1352(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1356(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1360(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1364(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1368(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1372(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1376(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1380(%esp), %edi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1384(%esp), %esi
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1388(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1392(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1396(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1264(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1308(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1256(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 1192(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1196(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1200(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1204(%esp), %esi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1224(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1228(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1232(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1120(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1132(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1140(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1176(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1112(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1068(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1076(%esp), %ebp
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1100(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 976(%esp), %edi
+ adcl 980(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1000(%esp), %edi
+ adcl 1004(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1008(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1016(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 904(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 968(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 904(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 908(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 912(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 920(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 924(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 940(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 960(%esp), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 832(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 888(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 892(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 824(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 796(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 816(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 688(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ adcl 736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 680(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 616(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 544(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 552(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 536(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 484(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ adcl 488(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 400(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 420(%esp), %edi
+ adcl 424(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 444(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 392(%esp), %edx
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 328(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 256(%esp), %ebp
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 268(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 280(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 248(%esp), %edx
+ movl %edi, %ecx
+ addl 184(%esp), %ecx
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ adcl 192(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 208(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 112(%esp), %esi
+ movl %edi, %eax
+ adcl 116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2444(%esp), %esi
+ subl (%esi), %edx
+ sbbl 4(%esi), %edi
+ movl %ebp, %ecx
+ sbbl 8(%esi), %ecx
+ movl %ebx, %eax
+ sbbl 12(%esi), %eax
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 16(%esi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ sbbl 32(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esi), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esi), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esi), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ testl %ebx, %ebx
+ js .LBB243_2
+# BB#1:
+ movl %edx, %esi
+.LBB243_2:
+ movl 2432(%esp), %edx
+ movl %esi, (%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB243_4
+# BB#3:
+ movl %edi, %esi
+.LBB243_4:
+ movl %esi, 4(%edx)
+ js .LBB243_6
+# BB#5:
+ movl %ecx, %ebp
+.LBB243_6:
+ movl %ebp, 8(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB243_8
+# BB#7:
+ movl %eax, %ecx
+.LBB243_8:
+ movl %ecx, 12(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB243_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB243_10:
+ movl %eax, 16(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB243_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB243_12:
+ movl %eax, 20(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB243_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB243_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB243_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB243_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB243_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB243_18:
+ movl %eax, 32(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB243_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB243_20:
+ movl %eax, 36(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB243_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB243_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB243_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB243_24:
+ movl %eax, 44(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB243_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB243_26:
+ movl %eax, 48(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB243_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB243_28:
+ movl %eax, 52(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB243_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB243_30:
+ movl %eax, 56(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB243_32
+# BB#31:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB243_32:
+ movl %eax, 60(%edx)
+ addl $2412, %esp # imm = 0x96C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end243:
+ .size mcl_fp_montNF16Lbmi2, .Lfunc_end243-mcl_fp_montNF16Lbmi2
+
+ .globl mcl_fp_montRed16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed16Lbmi2,@function
+mcl_fp_montRed16Lbmi2: # @mcl_fp_montRed16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L244$pb
+.L244$pb:
+ popl %eax
+.Ltmp55:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1384(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1380(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 124(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 60(%ecx), %edi
+ movl %edi, 180(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1288(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 1288(%esp), %eax
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1300(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 1328(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 1216(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1144(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1072(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1000(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 928(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 100(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 856(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 176(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 784(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 712(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl 752(%esp), %ebp
+ movl %ebp, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl 756(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1384(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ adcl 592(%esp), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 632(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 424(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ adcl 432(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 196(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 184(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 352(%esp), %esi
+ movl 164(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl 416(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 280(%esp), %esi
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 304(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 316(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %edx # 4-byte Reload
+ adcl 212(%esp), %edx
+ movl %edx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %edx, %eax
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 204(%esp), %esi # 4-byte Reload
+ sbbl 12(%esp), %esi # 4-byte Folded Reload
+ sbbl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 188(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 168(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 176(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 172(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 152(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ movl 144(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 160(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 180(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB244_2
+# BB#1:
+ movl %edx, 200(%esp) # 4-byte Spill
+.LBB244_2:
+ movl 1376(%esp), %edx
+ movl 200(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ testb %bl, %bl
+ jne .LBB244_4
+# BB#3:
+ movl %esi, 204(%esp) # 4-byte Spill
+.LBB244_4:
+ movl 204(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edx)
+ movl 192(%esp), %esi # 4-byte Reload
+ jne .LBB244_6
+# BB#5:
+ movl %ecx, %esi
+.LBB244_6:
+ movl %esi, 8(%edx)
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB244_8
+# BB#7:
+ movl %eax, %ecx
+.LBB244_8:
+ movl %ecx, 12(%edx)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ jne .LBB244_10
+# BB#9:
+ movl %ebp, %eax
+.LBB244_10:
+ movl %eax, 16(%edx)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB244_12
+# BB#11:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB244_12:
+ movl %ebp, 20(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 188(%esp), %ebx # 4-byte Reload
+ jne .LBB244_14
+# BB#13:
+ movl 104(%esp), %ebx # 4-byte Reload
+.LBB244_14:
+ movl %ebx, 24(%edx)
+ movl 156(%esp), %ebx # 4-byte Reload
+ movl 168(%esp), %edi # 4-byte Reload
+ jne .LBB244_16
+# BB#15:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB244_16:
+ movl %edi, 28(%edx)
+ movl 144(%esp), %edi # 4-byte Reload
+ jne .LBB244_18
+# BB#17:
+ movl 112(%esp), %eax # 4-byte Reload
+.LBB244_18:
+ movl %eax, 32(%edx)
+ jne .LBB244_20
+# BB#19:
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB244_20:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%edx)
+ jne .LBB244_22
+# BB#21:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB244_22:
+ movl %ebp, 40(%edx)
+ movl 132(%esp), %eax # 4-byte Reload
+ jne .LBB244_24
+# BB#23:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB244_24:
+ movl %ebx, 44(%edx)
+ jne .LBB244_26
+# BB#25:
+ movl 140(%esp), %edi # 4-byte Reload
+.LBB244_26:
+ movl %edi, 48(%edx)
+ jne .LBB244_28
+# BB#27:
+ movl 160(%esp), %eax # 4-byte Reload
+.LBB244_28:
+ movl %eax, 52(%edx)
+ jne .LBB244_30
+# BB#29:
+ movl 164(%esp), %esi # 4-byte Reload
+.LBB244_30:
+ movl %esi, 56(%edx)
+ jne .LBB244_32
+# BB#31:
+ movl 180(%esp), %ecx # 4-byte Reload
+.LBB244_32:
+ movl %ecx, 60(%edx)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end244:
+ .size mcl_fp_montRed16Lbmi2, .Lfunc_end244-mcl_fp_montRed16Lbmi2
+
+ .globl mcl_fp_addPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre16Lbmi2,@function
+mcl_fp_addPre16Lbmi2: # @mcl_fp_addPre16Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl %edx, 52(%edi)
+ movl %esi, 56(%edi)
+ movl 60(%eax), %eax
+ movl 60(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end245:
+ .size mcl_fp_addPre16Lbmi2, .Lfunc_end245-mcl_fp_addPre16Lbmi2
+
+ .globl mcl_fp_subPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre16Lbmi2,@function
+mcl_fp_subPre16Lbmi2: # @mcl_fp_subPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl %esi, 52(%ebx)
+ movl %edi, 56(%ebx)
+ movl 60(%edx), %edx
+ movl 60(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 60(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end246:
+ .size mcl_fp_subPre16Lbmi2, .Lfunc_end246-mcl_fp_subPre16Lbmi2
+
+ .globl mcl_fp_shr1_16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_16Lbmi2,@function
+mcl_fp_shr1_16Lbmi2: # @mcl_fp_shr1_16Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 56(%ecx)
+ shrl %eax
+ movl %eax, 60(%ecx)
+ popl %esi
+ retl
+.Lfunc_end247:
+ .size mcl_fp_shr1_16Lbmi2, .Lfunc_end247-mcl_fp_shr1_16Lbmi2
+
+ .globl mcl_fp_add16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add16Lbmi2,@function
+mcl_fp_add16Lbmi2: # @mcl_fp_add16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ebp
+ movl 80(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, %ebx
+ adcl 4(%ecx), %ebp
+ movl 8(%edx), %eax
+ adcl 8(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %edi
+ adcl 12(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ adcl 20(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ adcl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ adcl 28(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ adcl 32(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ adcl 36(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ adcl 40(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ adcl 44(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ adcl 48(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ adcl 52(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ adcl 56(%edx), %esi
+ movl 60(%ecx), %ecx
+ adcl 60(%edx), %ecx
+ movl 76(%esp), %edx
+ movl %ebx, (%edx)
+ movl %ebx, %eax
+ movl %ebp, 4(%edx)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edx)
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edx)
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, %ebp
+ sbbl 56(%edi), %esi
+ sbbl 60(%edi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB248_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, 4(%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%edx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%edx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl %ebp, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+.LBB248_2: # %carry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end248:
+ .size mcl_fp_add16Lbmi2, .Lfunc_end248-mcl_fp_add16Lbmi2
+
+ .globl mcl_fp_addNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF16Lbmi2,@function
+mcl_fp_addNF16Lbmi2: # @mcl_fp_addNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 148(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%edx), %edi
+ movl 40(%edx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl 20(%edx), %ebp
+ movl 16(%edx), %ebx
+ movl 12(%edx), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 12(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 44(%esi), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 52(%esi), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 56(%esi), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 60(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ subl (%edi), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edi), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ sbbl 56(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 60(%edi), %ebx
+ movl 80(%esp), %edi # 4-byte Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ testl %ebx, %ebx
+ js .LBB249_2
+# BB#1:
+ movl %esi, %edi
+.LBB249_2:
+ movl 144(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 84(%esp), %edx # 4-byte Reload
+ js .LBB249_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB249_4:
+ movl %edx, 4(%ebx)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB249_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB249_6:
+ movl %eax, 8(%ebx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB249_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB249_8:
+ movl %esi, 12(%ebx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB249_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB249_10:
+ movl %edx, 16(%ebx)
+ movl 112(%esp), %edi # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ js .LBB249_12
+# BB#11:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+.LBB249_12:
+ movl 72(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ebx)
+ js .LBB249_14
+# BB#13:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB249_14:
+ movl %ecx, 24(%ebx)
+ js .LBB249_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB249_16:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ js .LBB249_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB249_18:
+ movl %eax, 32(%ebx)
+ movl 96(%esp), %ecx # 4-byte Reload
+ js .LBB249_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+.LBB249_20:
+ movl 120(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%ebx)
+ js .LBB249_22
+# BB#21:
+ movl 36(%esp), %ebp # 4-byte Reload
+.LBB249_22:
+ movl %ebp, 40(%ebx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB249_24
+# BB#23:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB249_24:
+ movl %eax, 44(%ebx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB249_26
+# BB#25:
+ movl 44(%esp), %esi # 4-byte Reload
+.LBB249_26:
+ movl %esi, 48(%ebx)
+ js .LBB249_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB249_28:
+ movl %eax, 52(%ebx)
+ js .LBB249_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB249_30:
+ movl %ecx, 56(%ebx)
+ js .LBB249_32
+# BB#31:
+ movl 56(%esp), %edi # 4-byte Reload
+.LBB249_32:
+ movl %edi, 60(%ebx)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end249:
+ .size mcl_fp_addNF16Lbmi2, .Lfunc_end249-mcl_fp_addNF16Lbmi2
+
+ .globl mcl_fp_sub16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub16Lbmi2,@function
+mcl_fp_sub16Lbmi2: # @mcl_fp_sub16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esi), %edx
+ sbbl 44(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ sbbl 48(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 52(%esi), %eax
+ sbbl 52(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esi), %ebp
+ sbbl 56(%edi), %ebp
+ movl 60(%esi), %esi
+ sbbl 60(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 80(%esp), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl %ebp, 56(%ebx)
+ movl %esi, 60(%ebx)
+ je .LBB250_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ebx)
+.LBB250_2: # %nocarry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end250:
+ .size mcl_fp_sub16Lbmi2, .Lfunc_end250-mcl_fp_sub16Lbmi2
+
+ .globl mcl_fp_subNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF16Lbmi2,@function
+mcl_fp_subNF16Lbmi2: # @mcl_fp_subNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 132(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sarl $31, %eax
+ movl 136(%esp), %esi
+ movl 60(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %eax, %ebp
+ movl 16(%esi), %ebx
+ andl %eax, %ebx
+ movl 12(%esi), %edi
+ andl %eax, %edi
+ movl 8(%esi), %edx
+ andl %eax, %edx
+ movl 4(%esi), %ecx
+ andl %eax, %ecx
+ andl (%esi), %eax
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 124(%esp), %esi
+ movl %eax, (%esi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%esi)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edx, 8(%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 12(%esi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %eax, 56(%esi)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esi)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end251:
+ .size mcl_fp_subNF16Lbmi2, .Lfunc_end251-mcl_fp_subNF16Lbmi2
+
+ .globl mcl_fpDbl_add16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add16Lbmi2,@function
+mcl_fpDbl_add16Lbmi2: # @mcl_fpDbl_add16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 144(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %ebx
+ addl (%ebx), %esi
+ adcl 4(%ebx), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebx), %edi
+ movl 12(%ebx), %ebp
+ movl 136(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebx), %esi
+ adcl 12(%ecx), %ebp
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 72(%ecx), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebp, 12(%eax)
+ movl 20(%ebx), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebx), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebx), %edi
+ adcl %edx, %edi
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebx), %esi
+ adcl %edx, %esi
+ movl 36(%ecx), %edx
+ movl %edi, 28(%eax)
+ movl 36(%ebx), %edi
+ adcl %edx, %edi
+ movl 40(%ecx), %edx
+ movl %esi, 32(%eax)
+ movl 40(%ebx), %esi
+ adcl %edx, %esi
+ movl 44(%ecx), %edx
+ movl %edi, 36(%eax)
+ movl 44(%ebx), %edi
+ adcl %edx, %edi
+ movl 48(%ecx), %edx
+ movl %esi, 40(%eax)
+ movl 48(%ebx), %esi
+ adcl %edx, %esi
+ movl 52(%ecx), %edx
+ movl %edi, 44(%eax)
+ movl 52(%ebx), %edi
+ adcl %edx, %edi
+ movl 56(%ecx), %edx
+ movl %esi, 48(%eax)
+ movl 56(%ebx), %esi
+ adcl %edx, %esi
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%ebx), %ebp
+ adcl %edx, %ebp
+ movl 64(%ecx), %edx
+ movl %esi, 56(%eax)
+ movl 64(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl %ebp, 60(%eax)
+ movl 68(%ebx), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebp
+ movl 76(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebp
+ movl 84(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%ecx), %ebp
+ movl 92(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %ebp
+ movl 96(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 100(%ecx), %ebp
+ movl 100(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 104(%ecx), %ebp
+ movl 104(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%ecx), %ebp
+ movl 108(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%ecx), %edx
+ movl 112(%ebx), %ebp
+ adcl %edx, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ movl 116(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 120(%ecx), %edx
+ movl 120(%ebx), %edi
+ adcl %edx, %edi
+ movl 124(%ecx), %ecx
+ movl 124(%ebx), %esi
+ adcl %ecx, %esi
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 148(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ subl (%edx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 24(%edx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 28(%edx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 32(%edx), %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 36(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 44(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 48(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ sbbl 52(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 56(%edx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ sbbl 60(%edx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB252_2
+# BB#1:
+ movl %ebx, 64(%esp) # 4-byte Spill
+.LBB252_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB252_4:
+ movl 136(%esp), %ebx
+ movl %ecx, 64(%ebx)
+ movl %esi, %ebp
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB252_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB252_6:
+ movl %esi, 68(%ebx)
+ movl 84(%esp), %esi # 4-byte Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB252_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB252_8:
+ movl %eax, 72(%ebx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB252_10
+# BB#9:
+ movl 12(%esp), %esi # 4-byte Reload
+.LBB252_10:
+ movl %esi, 76(%ebx)
+ jne .LBB252_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+.LBB252_12:
+ movl 104(%esp), %esi # 4-byte Reload
+ movl %esi, 80(%ebx)
+ jne .LBB252_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB252_14:
+ movl %edx, 84(%ebx)
+ jne .LBB252_16
+# BB#15:
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+.LBB252_16:
+ movl 108(%esp), %edx # 4-byte Reload
+ movl %edx, 88(%ebx)
+ jne .LBB252_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB252_18:
+ movl %ecx, 92(%ebx)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%ebx)
+ jne .LBB252_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+.LBB252_20:
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%ebx)
+ jne .LBB252_22
+# BB#21:
+ movl 36(%esp), %edi # 4-byte Reload
+.LBB252_22:
+ movl %edi, 104(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ jne .LBB252_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB252_24:
+ movl %ecx, 108(%ebx)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB252_26:
+ movl %eax, 112(%ebx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB252_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB252_28:
+ movl %eax, 116(%ebx)
+ jne .LBB252_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB252_30:
+ movl %ecx, 120(%ebx)
+ jne .LBB252_32
+# BB#31:
+ movl 56(%esp), %ebp # 4-byte Reload
+.LBB252_32:
+ movl %ebp, 124(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end252:
+ .size mcl_fpDbl_add16Lbmi2, .Lfunc_end252-mcl_fpDbl_add16Lbmi2
+
+ .globl mcl_fpDbl_sub16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub16Lbmi2,@function
+mcl_fpDbl_sub16Lbmi2: # @mcl_fpDbl_sub16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 132(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 136(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 128(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%eax), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%eax), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%eax), %edi
+ sbbl %ebx, %edi
+ movl 52(%edx), %ebx
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ sbbl %ebx, %esi
+ movl 56(%edx), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%eax), %edi
+ sbbl %ebx, %edi
+ movl 60(%edx), %ebx
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ sbbl %ebx, %esi
+ movl 64(%edx), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%eax), %edi
+ sbbl %ebx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edi
+ movl %esi, 60(%ecx)
+ movl 68(%eax), %esi
+ sbbl %edi, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%edx), %esi
+ movl 72(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%edx), %esi
+ movl 76(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 80(%edx), %esi
+ movl 80(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%edx), %esi
+ movl 84(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%edx), %esi
+ movl 88(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%edx), %esi
+ movl 92(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%edx), %esi
+ movl 96(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%edx), %esi
+ movl 100(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%edx), %esi
+ movl 104(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%edx), %esi
+ movl 108(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%edx), %esi
+ movl 112(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%edx), %esi
+ movl 116(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%edx), %esi
+ movl 120(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%edx), %edx
+ movl 124(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 140(%esp), %ebx
+ jne .LBB253_1
+# BB#2:
+ movl $0, 68(%esp) # 4-byte Folded Spill
+ jmp .LBB253_3
+.LBB253_1:
+ movl 60(%ebx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+.LBB253_3:
+ testb %al, %al
+ jne .LBB253_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebp
+ jmp .LBB253_6
+.LBB253_4:
+ movl (%ebx), %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB253_6:
+ jne .LBB253_7
+# BB#8:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB253_9
+.LBB253_7:
+ movl 56(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB253_9:
+ jne .LBB253_10
+# BB#11:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB253_12
+.LBB253_10:
+ movl 52(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB253_12:
+ jne .LBB253_13
+# BB#14:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB253_15
+.LBB253_13:
+ movl 48(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB253_15:
+ jne .LBB253_16
+# BB#17:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB253_18
+.LBB253_16:
+ movl 44(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB253_18:
+ jne .LBB253_19
+# BB#20:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB253_21
+.LBB253_19:
+ movl 40(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB253_21:
+ jne .LBB253_22
+# BB#23:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB253_24
+.LBB253_22:
+ movl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB253_24:
+ jne .LBB253_25
+# BB#26:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB253_27
+.LBB253_25:
+ movl 32(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB253_27:
+ jne .LBB253_28
+# BB#29:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB253_30
+.LBB253_28:
+ movl 28(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB253_30:
+ jne .LBB253_31
+# BB#32:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB253_33
+.LBB253_31:
+ movl 24(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB253_33:
+ jne .LBB253_34
+# BB#35:
+ movl $0, %esi
+ jmp .LBB253_36
+.LBB253_34:
+ movl 20(%ebx), %esi
+.LBB253_36:
+ jne .LBB253_37
+# BB#38:
+ movl $0, %edx
+ jmp .LBB253_39
+.LBB253_37:
+ movl 16(%ebx), %edx
+.LBB253_39:
+ jne .LBB253_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB253_42
+.LBB253_40:
+ movl 12(%ebx), %edi
+.LBB253_42:
+ jne .LBB253_43
+# BB#44:
+ xorl %ebx, %ebx
+ jmp .LBB253_45
+.LBB253_43:
+ movl 8(%ebx), %ebx
+.LBB253_45:
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 68(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 76(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl %eax, 120(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 124(%ecx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16Lbmi2, .Lfunc_end253-mcl_fpDbl_sub16Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv544x32,@function
+.LmulPv544x32: # @mulPv544x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl %edx, %eax
+ movl 80(%esp), %esi
+ movl %esi, %edx
+ mulxl 4(%eax), %edi, %ebx
+ movl %esi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 56(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 8(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 12(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 16(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 20(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 24(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 28(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 32(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 36(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 40(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 44(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 48(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 52(%eax), %ebx, %ebp
+ adcl %edi, %ebx
+ movl %esi, %edx
+ mulxl 56(%eax), %edi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %esi, %edx
+ mulxl 60(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%ecx)
+ movl %ebx, 52(%ecx)
+ movl %edi, 56(%ecx)
+ movl %edx, 60(%ecx)
+ movl %esi, %edx
+ mulxl 64(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl $0, %edx
+ movl %edx, 68(%ecx)
+ movl %ecx, %eax
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+
+ .globl mcl_fp_mulUnitPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre17Lbmi2,@function
+mcl_fp_mulUnitPre17Lbmi2: # @mcl_fp_mulUnitPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $140, %esp
+ calll .L255$pb
+.L255$pb:
+ popl %ebx
+.Ltmp56:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx
+ movl 168(%esp), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 164(%esp), %edx
+ calll .LmulPv544x32
+ movl 132(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp
+ movl 80(%esp), %ebx
+ movl 76(%esp), %edi
+ movl 72(%esp), %esi
+ movl 64(%esp), %edx
+ movl 68(%esp), %ecx
+ movl 160(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ addl $140, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17Lbmi2, .Lfunc_end255-mcl_fp_mulUnitPre17Lbmi2
+
+ .globl mcl_fpDbl_mulPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre17Lbmi2,@function
+mcl_fpDbl_mulPre17Lbmi2: # @mcl_fpDbl_mulPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L256$pb
+.L256$pb:
+ popl %edi
+.Ltmp57:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 1384(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1384(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1380(%esp), %eax
+ movl %eax, %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17Lbmi2, .Lfunc_end256-mcl_fpDbl_mulPre17Lbmi2
+
+ .globl mcl_fpDbl_sqrPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre17Lbmi2,@function
+mcl_fpDbl_sqrPre17Lbmi2: # @mcl_fpDbl_sqrPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L257$pb
+.L257$pb:
+ popl %ebx
+.Ltmp58:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx
+ movl %ebx, 124(%esp) # 4-byte Spill
+ movl 1380(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 60(%edx), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 64(%edx), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17Lbmi2, .Lfunc_end257-mcl_fpDbl_sqrPre17Lbmi2
+
+ .globl mcl_fp_mont17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont17Lbmi2,@function
+mcl_fp_mont17Lbmi2: # @mcl_fp_mont17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2588, %esp # imm = 0xA1C
+ calll .L258$pb
+.L258$pb:
+ popl %ebx
+.Ltmp59:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx
+ movl 2620(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2512(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 2512(%esp), %ebp
+ movl 2516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2580(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 2576(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 2572(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2568(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2564(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2532(%esp), %edi
+ movl 2528(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2524(%esp), %esi
+ movl 2520(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2440(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ addl 2440(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 2452(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2456(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2460(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2472(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2480(%esp), %eax
+ movl %eax, %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2496(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 2616(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2368(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 2368(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2384(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 2392(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 2404(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 2408(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2424(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2436(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2296(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 2296(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2308(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2316(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2320(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2324(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2328(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2332(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 2336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2340(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2344(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2348(%esp), %esi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2352(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2356(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2360(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2364(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2616(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2224(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 2224(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2272(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2288(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 2292(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2152(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 2152(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 2204(%esp), %ebp
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 2208(%esp), %edi
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 2212(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2080(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2080(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2092(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2108(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2116(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 2128(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl 2136(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2148(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2008(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 2008(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2036(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2044(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2052(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2056(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 2072(%esp), %ebp
+ adcl 2076(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1936(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1936(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1956(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1976(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1980(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 1996(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1864(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1864(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ adcl 1884(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1896(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1916(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1792(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1792(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1820(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 1840(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1848(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1852(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1748(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1752(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1756(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1764(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1768(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1772(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1776(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl 1780(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1788(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1648(%esp), %eax
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1652(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1672(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1676(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1712(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1576(%esp), %ebp
+ adcl 1580(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1592(%esp), %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1596(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1504(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 1516(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1520(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 1540(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1432(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1460(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1464(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 1468(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1476(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1496(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1360(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1384(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1408(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1288(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1312(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1340(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1344(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1216(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 1240(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1268(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 1144(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1160(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1188(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1192(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1072(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1080(%esp), %ebp
+ adcl 1084(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1112(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1000(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1008(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1020(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1028(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1032(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ adcl 1040(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1056(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 984(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 856(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 896(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2616(%esp), %ecx
+ movl %ecx, %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 784(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 828(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 712(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 756(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 640(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 660(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 568(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 600(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 496(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 540(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 480(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 124(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 364(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 404(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 280(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ adcl 300(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 128(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ adcl 212(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %edi
+ addl 136(%esp), %esi
+ movl 116(%esp), %edx # 4-byte Reload
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 148(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 168(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 200(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 204(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl 2620(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 8(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %eax, %edx
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ sbbl 52(%ebx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 56(%ebx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 60(%ebp), %ebx
+ movl 124(%esp), %esi # 4-byte Reload
+ sbbl 64(%ebp), %esi
+ movl %esi, %ebp
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB258_2
+# BB#1:
+ movl %ebx, 108(%esp) # 4-byte Spill
+.LBB258_2:
+ movl %edi, %ebx
+ testb %bl, %bl
+ movl 132(%esp), %ebx # 4-byte Reload
+ jne .LBB258_4
+# BB#3:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB258_4:
+ movl 2608(%esp), %eax
+ movl %ebx, (%eax)
+ movl 120(%esp), %ebx # 4-byte Reload
+ jne .LBB258_6
+# BB#5:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB258_6:
+ movl %ebx, 4(%eax)
+ jne .LBB258_8
+# BB#7:
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB258_8:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ jne .LBB258_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+.LBB258_10:
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 112(%esp), %esi # 4-byte Reload
+ jne .LBB258_12
+# BB#11:
+ movl 28(%esp), %esi # 4-byte Reload
+.LBB258_12:
+ movl %esi, 16(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB258_14
+# BB#13:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB258_14:
+ movl %edx, 20(%eax)
+ jne .LBB258_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB258_16:
+ movl %ecx, 24(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB258_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB258_18:
+ movl %ecx, 28(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB258_20
+# BB#19:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB258_20:
+ movl %ecx, 32(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB258_22
+# BB#21:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB258_22:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB258_24
+# BB#23:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB258_24:
+ movl %ecx, 40(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB258_26
+# BB#25:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB258_26:
+ movl %ecx, 44(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ jne .LBB258_28
+# BB#27:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB258_28:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB258_30
+# BB#29:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB258_30:
+ movl %ecx, 52(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB258_32
+# BB#31:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB258_32:
+ movl %ecx, 56(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB258_34
+# BB#33:
+ movl %ebp, %ecx
+.LBB258_34:
+ movl %ecx, 64(%eax)
+ addl $2588, %esp # imm = 0xA1C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end258:
+ .size mcl_fp_mont17Lbmi2, .Lfunc_end258-mcl_fp_mont17Lbmi2
+
+ .globl mcl_fp_montNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF17Lbmi2,@function
+mcl_fp_montNF17Lbmi2: # @mcl_fp_montNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2572, %esp # imm = 0xA0C
+ calll .L259$pb
+.L259$pb:
+ popl %ebx
+.Ltmp60:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx
+ movl 2604(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2496(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2496(%esp), %edi
+ movl 2500(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2564(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2532(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2524(%esp), %ebp
+ movl 2520(%esp), %esi
+ movl 2516(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2512(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2508(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2504(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2424(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2424(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2436(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 2448(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2452(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 2456(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2460(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 2472(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2480(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2352(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2420(%esp), %ecx
+ movl 112(%esp), %edx # 4-byte Reload
+ addl 2352(%esp), %edx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2356(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2360(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2380(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2384(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2392(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2396(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2404(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2408(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2280(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2280(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2288(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 2316(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2324(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2332(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 2340(%esp), %ebp
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 2344(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2208(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2276(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 2208(%esp), %edx
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2212(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2216(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2220(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2224(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2228(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2232(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 2240(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2244(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2248(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2256(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2260(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 2264(%esp), %ebp
+ adcl 2268(%esp), %edi
+ movl %edi, %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2272(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2136(%esp), %ecx
+ movl 2604(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ addl 2136(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 2188(%esp), %edi
+ adcl 2192(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 2196(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 2200(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2064(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2132(%esp), %eax
+ movl 104(%esp), %edx # 4-byte Reload
+ addl 2064(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2080(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2088(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 2112(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2116(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2120(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 2124(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2128(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1992(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1992(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2016(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2036(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2040(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2044(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 2056(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2060(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1920(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1988(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1920(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1928(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1936(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1948(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1960(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1964(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1968(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1976(%esp), %esi
+ adcl 1980(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1984(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1848(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1864(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1884(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1904(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1908(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, %edi
+ movl 2600(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1844(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 1776(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1780(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1788(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1792(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1796(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1800(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1804(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1808(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1812(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1816(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1820(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1824(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1828(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1832(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1840(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1704(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1736(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 2596(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 1700(%esp), %eax
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1632(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1636(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1640(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1644(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1648(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1660(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1672(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1560(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1560(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1580(%esp), %edi
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1584(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1600(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1608(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1556(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1488(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ adcl 1508(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1524(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 1532(%esp), %esi
+ movl %esi, %ebp
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1536(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1540(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1544(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1548(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1552(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1416(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1428(%esp), %esi
+ adcl 1432(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1436(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1460(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 2600(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1344(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1412(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1344(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1368(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1372(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1376(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1380(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1384(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1388(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1392(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1396(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1408(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1272(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1288(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1304(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1268(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1200(%esp), %ecx
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1204(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 1212(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ adcl 1228(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1232(%esp), %edi
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 1236(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1256(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1260(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1264(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1128(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1152(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1160(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1172(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1188(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1124(%esp), %edx
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1056(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1096(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1112(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1116(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 984(%esp), %esi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 988(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 912(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 980(%esp), %eax
+ addl 912(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 920(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 924(%esp), %edi
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 928(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 944(%esp), %ebp
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 840(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 864(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 872(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 836(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 768(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 788(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 792(%esp), %edi
+ adcl 796(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 696(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 732(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 756(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 692(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 624(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ adcl 640(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 656(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 660(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 604(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 548(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 524(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 408(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 432(%esp), %ebp
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 436(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 404(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 336(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 356(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 264(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 280(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 300(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 312(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 260(%esp), %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 204(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 120(%esp), %esi
+ movl 92(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 128(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl 132(%esp), %esi
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2604(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ebp
+ movl %esi, %ebx
+ sbbl 8(%edi), %ebx
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edi), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 64(%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 116(%esp), %edi # 4-byte Reload
+ js .LBB259_2
+# BB#1:
+ movl %edx, %edi
+.LBB259_2:
+ movl 2592(%esp), %edx
+ movl %edi, (%edx)
+ movl 112(%esp), %edi # 4-byte Reload
+ js .LBB259_4
+# BB#3:
+ movl %ebp, %edi
+.LBB259_4:
+ movl %edi, 4(%edx)
+ js .LBB259_6
+# BB#5:
+ movl %ebx, %esi
+.LBB259_6:
+ movl %esi, 8(%edx)
+ movl 104(%esp), %esi # 4-byte Reload
+ js .LBB259_8
+# BB#7:
+ movl %ecx, %esi
+.LBB259_8:
+ movl %esi, 12(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB259_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB259_10:
+ movl %ecx, 16(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB259_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB259_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB259_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB259_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB259_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB259_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB259_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB259_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB259_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB259_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB259_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB259_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB259_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB259_24:
+ movl %eax, 44(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB259_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB259_26:
+ movl %eax, 48(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB259_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB259_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB259_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB259_30:
+ movl %eax, 56(%edx)
+ movl 108(%esp), %eax # 4-byte Reload
+ js .LBB259_32
+# BB#31:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB259_32:
+ movl %eax, 60(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB259_34
+# BB#33:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB259_34:
+ movl %eax, 64(%edx)
+ addl $2572, %esp # imm = 0xA0C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end259:
+ .size mcl_fp_montNF17Lbmi2, .Lfunc_end259-mcl_fp_montNF17Lbmi2
+
+ .globl mcl_fp_montRed17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed17Lbmi2,@function
+mcl_fp_montRed17Lbmi2: # @mcl_fp_montRed17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L260$pb
+.L260$pb:
+ popl %eax
+.Ltmp61:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1464(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 1460(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 132(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 128(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 124(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 64(%ecx), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 60(%ecx), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 64(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1360(%esp), %ecx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 1360(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1372(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1376(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1288(%esp), %esi
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1292(%esp), %edx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 1336(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1216(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1260(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 1264(%esp), %edi
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1144(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1072(%esp), %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1000(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 928(%esp), %esi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 932(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 856(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 924(%esp), %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 96(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 784(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 712(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 760(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 780(%esp), %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 192(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 684(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1464(%esp), %eax
+ movl %eax, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 596(%esp), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ adcl 600(%esp), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %esi # 4-byte Reload
+ adcl 604(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 500(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 512(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 424(%esp), %edi
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 352(%esp), %esi
+ movl %edi, %ecx
+ adcl 356(%esp), %ecx
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %edi # 4-byte Reload
+ adcl 384(%esp), %edi
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 280(%esp), %ebp
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 308(%esp), %edi
+ movl %edi, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edx # 4-byte Reload
+ adcl 216(%esp), %edx
+ movl %edx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ adcl 264(%esp), %ebx
+ movl %ebx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 276(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 200(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 12(%esp), %eax # 4-byte Folded Reload
+ sbbl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 184(%esp), %esi # 4-byte Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 188(%esp), %esi # 4-byte Reload
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ sbbl 32(%esp), %esi # 4-byte Folded Reload
+ movl 172(%esp), %ebp # 4-byte Reload
+ sbbl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ sbbl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ sbbl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 136(%esp) # 4-byte Spill
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 176(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB260_2
+# BB#1:
+ movl %esi, 168(%esp) # 4-byte Spill
+.LBB260_2:
+ testb %bl, %bl
+ movl 200(%esp), %esi # 4-byte Reload
+ jne .LBB260_4
+# BB#3:
+ movl %edi, %esi
+.LBB260_4:
+ movl 1456(%esp), %edi
+ movl %esi, (%edi)
+ movl 156(%esp), %esi # 4-byte Reload
+ movl 204(%esp), %ebx # 4-byte Reload
+ jne .LBB260_6
+# BB#5:
+ movl %edx, %ebx
+.LBB260_6:
+ movl %ebx, 4(%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ movl 192(%esp), %edx # 4-byte Reload
+ jne .LBB260_8
+# BB#7:
+ movl %ecx, %edx
+.LBB260_8:
+ movl %edx, 8(%edi)
+ movl 132(%esp), %edx # 4-byte Reload
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB260_10
+# BB#9:
+ movl %eax, %ecx
+.LBB260_10:
+ movl %ecx, 12(%edi)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 180(%esp), %eax # 4-byte Reload
+ jne .LBB260_12
+# BB#11:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB260_12:
+ movl %eax, 16(%edi)
+ movl 188(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB260_14
+# BB#13:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB260_14:
+ movl %ebp, 20(%edi)
+ movl 152(%esp), %ebp # 4-byte Reload
+ jne .LBB260_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB260_16:
+ movl %eax, 24(%edi)
+ movl 168(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB260_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB260_18:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edi)
+ jne .LBB260_20
+# BB#19:
+ movl 108(%esp), %esi # 4-byte Reload
+.LBB260_20:
+ movl %esi, 36(%edi)
+ jne .LBB260_22
+# BB#21:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 160(%esp) # 4-byte Spill
+.LBB260_22:
+ movl 160(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%edi)
+ movl 128(%esp), %eax # 4-byte Reload
+ jne .LBB260_24
+# BB#23:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB260_24:
+ movl %ebp, 44(%edi)
+ jne .LBB260_26
+# BB#25:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB260_26:
+ movl %ebx, 48(%edi)
+ jne .LBB260_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB260_28:
+ movl %eax, 52(%edi)
+ jne .LBB260_30
+# BB#29:
+ movl 148(%esp), %edx # 4-byte Reload
+.LBB260_30:
+ movl %edx, 56(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB260_32
+# BB#31:
+ movl 164(%esp), %ecx # 4-byte Reload
+.LBB260_32:
+ movl %ecx, 60(%edi)
+ jne .LBB260_34
+# BB#33:
+ movl 176(%esp), %eax # 4-byte Reload
+.LBB260_34:
+ movl %eax, 64(%edi)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end260:
+ .size mcl_fp_montRed17Lbmi2, .Lfunc_end260-mcl_fp_montRed17Lbmi2
+
+ .globl mcl_fp_addPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre17Lbmi2,@function
+mcl_fp_addPre17Lbmi2: # @mcl_fp_addPre17Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl 60(%eax), %ebx
+ movl %edx, 52(%edi)
+ movl 60(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 56(%edi)
+ movl %edx, 60(%edi)
+ movl 64(%eax), %eax
+ movl 64(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 64(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end261:
+ .size mcl_fp_addPre17Lbmi2, .Lfunc_end261-mcl_fp_addPre17Lbmi2
+
+ .globl mcl_fp_subPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre17Lbmi2,@function
+mcl_fp_subPre17Lbmi2: # @mcl_fp_subPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 60(%edx), %ebp
+ movl %esi, 52(%ebx)
+ movl 60(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 56(%ebx)
+ movl %esi, 60(%ebx)
+ movl 64(%edx), %edx
+ movl 64(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 64(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end262:
+ .size mcl_fp_subPre17Lbmi2, .Lfunc_end262-mcl_fp_subPre17Lbmi2
+
+ .globl mcl_fp_shr1_17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_17Lbmi2,@function
+mcl_fp_shr1_17Lbmi2: # @mcl_fp_shr1_17Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 56(%ecx)
+ movl 64(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 60(%ecx)
+ shrl %eax
+ movl %eax, 64(%ecx)
+ popl %esi
+ retl
+.Lfunc_end263:
+ .size mcl_fp_shr1_17Lbmi2, .Lfunc_end263-mcl_fp_shr1_17Lbmi2
+
+ .globl mcl_fp_add17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add17Lbmi2,@function
+mcl_fp_add17Lbmi2: # @mcl_fp_add17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 84(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, %ebx
+ adcl 4(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl 16(%edx), %edi
+ adcl 12(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ adcl 20(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ adcl 24(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ adcl 28(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ adcl 32(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ adcl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ adcl 40(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ adcl 44(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ adcl 48(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ adcl 52(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ adcl 56(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ adcl 60(%esi), %ebp
+ movl 64(%edx), %edx
+ adcl 64(%esi), %edx
+ movl 80(%esp), %esi
+ movl %ebx, (%esi)
+ movl %ebx, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%esi)
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%esi)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 60(%edi), %ebp
+ sbbl 64(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB264_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%esi)
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+.LBB264_2: # %carry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end264:
+ .size mcl_fp_add17Lbmi2, .Lfunc_end264-mcl_fp_add17Lbmi2
+
+ .globl mcl_fp_addNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF17Lbmi2,@function
+mcl_fp_addNF17Lbmi2: # @mcl_fp_addNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 160(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 156(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 56(%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 52(%eax), %ebp
+ movl 48(%eax), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 52(%esi), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 60(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 64(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ sbbl 16(%esi), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 120(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ sbbl 40(%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 44(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ sbbl 48(%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 52(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ movl %eax, %ebp
+ sbbl 56(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 60(%esi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ sbbl 64(%esi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ js .LBB265_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB265_2:
+ movl 152(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB265_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB265_4:
+ movl %eax, 4(%ebx)
+ movl 108(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB265_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB265_6:
+ movl %edi, 8(%ebx)
+ movl 116(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ js .LBB265_8
+# BB#7:
+ movl %edx, %ecx
+.LBB265_8:
+ movl %ecx, 12(%ebx)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ js .LBB265_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB265_10:
+ movl %edx, 16(%ebx)
+ movl %ebp, %edx
+ js .LBB265_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB265_12:
+ movl %esi, 20(%ebx)
+ movl 112(%esp), %ebp # 4-byte Reload
+ js .LBB265_14
+# BB#13:
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+.LBB265_14:
+ movl 120(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ebx)
+ js .LBB265_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB265_16:
+ movl %ecx, 28(%ebx)
+ js .LBB265_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB265_18:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%ebx)
+ js .LBB265_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB265_20:
+ movl %eax, 36(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB265_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 128(%esp) # 4-byte Spill
+.LBB265_22:
+ movl 128(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%ebx)
+ js .LBB265_24
+# BB#23:
+ movl 40(%esp), %ebp # 4-byte Reload
+.LBB265_24:
+ movl %ebp, 44(%ebx)
+ js .LBB265_26
+# BB#25:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB265_26:
+ movl %edi, 48(%ebx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB265_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB265_28:
+ movl %eax, 52(%ebx)
+ js .LBB265_30
+# BB#29:
+ movl 52(%esp), %edx # 4-byte Reload
+.LBB265_30:
+ movl %edx, 56(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB265_32
+# BB#31:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB265_32:
+ movl %eax, 60(%ebx)
+ js .LBB265_34
+# BB#33:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB265_34:
+ movl %ecx, 64(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end265:
+ .size mcl_fp_addNF17Lbmi2, .Lfunc_end265-mcl_fp_addNF17Lbmi2
+
+ .globl mcl_fp_sub17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub17Lbmi2,@function
+mcl_fp_sub17Lbmi2: # @mcl_fp_sub17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%esi), %edx
+ sbbl 48(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ sbbl 52(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esi), %eax
+ sbbl 56(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 60(%esi), %ebp
+ sbbl 60(%edi), %ebp
+ movl 64(%esi), %esi
+ sbbl 64(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 84(%esp), %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%ebx)
+ movl %edx, 48(%ebx)
+ movl %ecx, 52(%ebx)
+ movl %eax, 56(%ebx)
+ movl %ebp, 60(%ebx)
+ movl %esi, 64(%ebx)
+ je .LBB266_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 96(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 56(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl %ecx, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 60(%ebx)
+ movl 64(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+.LBB266_2: # %nocarry
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end266:
+ .size mcl_fp_sub17Lbmi2, .Lfunc_end266-mcl_fp_sub17Lbmi2
+
+ .globl mcl_fp_subNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF17Lbmi2,@function
+mcl_fp_subNF17Lbmi2: # @mcl_fp_subNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 144(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 40(%edi), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 60(%edi), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 64(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ shldl $1, %ecx, %edx
+ movl 148(%esp), %ebx
+ movl 28(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ andl (%ebx), %edx
+ movl 64(%ebx), %edi
+ movl %eax, %ecx
+ andl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ rorxl $31, %ecx, %eax
+ andl 60(%ebx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 32(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 24(%ebx), %ebp
+ andl %eax, %ebp
+ movl 20(%ebx), %edi
+ andl %eax, %edi
+ movl 16(%ebx), %esi
+ andl %eax, %esi
+ andl 8(%ebx), %eax
+ addl 72(%esp), %edx # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl 136(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 4(%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 8(%ebx)
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 16(%ebx)
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%ebx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 56(%ebx)
+ movl %eax, 60(%ebx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end267:
+ .size mcl_fp_subNF17Lbmi2, .Lfunc_end267-mcl_fp_subNF17Lbmi2
+
+ .globl mcl_fpDbl_add17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add17Lbmi2,@function
+mcl_fpDbl_add17Lbmi2: # @mcl_fpDbl_add17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $128, %esp
+ movl 156(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 148(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 76(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl 64(%ecx), %ebx
+ movl %esi, 56(%eax)
+ movl 64(%edx), %esi
+ adcl %ebx, %esi
+ movl 68(%ecx), %ebx
+ movl %edi, 60(%eax)
+ movl 68(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edi
+ movl %esi, 64(%eax)
+ movl 72(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 76(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl 104(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl 108(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 112(%ecx), %esi
+ movl 112(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 116(%ecx), %esi
+ movl 116(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 120(%ecx), %edi
+ movl 120(%edx), %esi
+ adcl %edi, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 124(%ecx), %ebx
+ movl 124(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 128(%ecx), %ebx
+ movl 128(%edx), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 132(%ecx), %ecx
+ movl 132(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 160(%esp), %ebx
+ movl 92(%esp), %eax # 4-byte Reload
+ subl (%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 40(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 44(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 48(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 52(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ sbbl 56(%ebx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl 60(%ebx), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 64(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB268_2
+# BB#1:
+ movl %ebp, %edx
+.LBB268_2:
+ testb %cl, %cl
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %esi # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB268_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB268_4:
+ movl 148(%esp), %ecx
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 76(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl %ecx, 80(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 88(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 92(%eax)
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%eax)
+ movl %ebp, 104(%eax)
+ movl %ebx, 108(%eax)
+ movl %edi, 112(%eax)
+ movl %esi, 116(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB268_6
+# BB#5:
+ movl 52(%esp), %esi # 4-byte Reload
+.LBB268_6:
+ movl %esi, 120(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB268_8
+# BB#7:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB268_8:
+ movl %esi, 124(%eax)
+ jne .LBB268_10
+# BB#9:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB268_10:
+ movl %ecx, 128(%eax)
+ movl %edx, 132(%eax)
+ addl $128, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end268:
+ .size mcl_fpDbl_add17Lbmi2, .Lfunc_end268-mcl_fpDbl_add17Lbmi2
+
+ .globl mcl_fpDbl_sub17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub17Lbmi2,@function
+mcl_fpDbl_sub17Lbmi2: # @mcl_fpDbl_sub17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 144(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 136(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl 28(%esi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %ebx, %eax
+ movl 32(%esi), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%edx), %edi
+ sbbl %ebx, %edi
+ movl 36(%esi), %ebx
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %ebx, %eax
+ movl 40(%esi), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%edx), %edi
+ sbbl %ebx, %edi
+ movl 44(%esi), %ebx
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %ebx, %eax
+ movl 48(%esi), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%edx), %edi
+ sbbl %ebx, %edi
+ movl 52(%esi), %ebx
+ movl %eax, 44(%ecx)
+ movl 52(%edx), %eax
+ sbbl %ebx, %eax
+ movl 56(%esi), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%edx), %edi
+ sbbl %ebx, %edi
+ movl 60(%esi), %ebx
+ movl %eax, 52(%ecx)
+ movl 60(%edx), %eax
+ sbbl %ebx, %eax
+ movl 64(%esi), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%edx), %edi
+ sbbl %ebx, %edi
+ movl 68(%esi), %ebx
+ movl %eax, 60(%ecx)
+ movl 68(%edx), %eax
+ sbbl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 72(%esi), %eax
+ movl %edi, 64(%ecx)
+ movl 72(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esi), %eax
+ movl 76(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 80(%esi), %eax
+ movl 80(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esi), %eax
+ movl 84(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%esi), %eax
+ movl 88(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%esi), %eax
+ movl 92(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 96(%esi), %eax
+ movl 96(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 100(%esi), %eax
+ movl 100(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%esi), %eax
+ movl 104(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%esi), %eax
+ movl 108(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%esi), %eax
+ movl 112(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esi), %eax
+ movl 116(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%esi), %eax
+ movl 120(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl 124(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 128(%esi), %eax
+ movl 128(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 132(%esi), %eax
+ movl 132(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 148(%esp), %ebp
+ jne .LBB269_1
+# BB#2:
+ movl $0, 76(%esp) # 4-byte Folded Spill
+ jmp .LBB269_3
+.LBB269_1:
+ movl 64(%ebp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+.LBB269_3:
+ testb %al, %al
+ jne .LBB269_4
+# BB#5:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB269_6
+.LBB269_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB269_6:
+ jne .LBB269_7
+# BB#8:
+ movl $0, 40(%esp) # 4-byte Folded Spill
+ jmp .LBB269_9
+.LBB269_7:
+ movl 60(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB269_9:
+ jne .LBB269_10
+# BB#11:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB269_12
+.LBB269_10:
+ movl 56(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB269_12:
+ jne .LBB269_13
+# BB#14:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB269_15
+.LBB269_13:
+ movl 52(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB269_15:
+ jne .LBB269_16
+# BB#17:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB269_18
+.LBB269_16:
+ movl 48(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB269_18:
+ jne .LBB269_19
+# BB#20:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB269_21
+.LBB269_19:
+ movl 44(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB269_21:
+ jne .LBB269_22
+# BB#23:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB269_24
+.LBB269_22:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB269_24:
+ jne .LBB269_25
+# BB#26:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB269_27
+.LBB269_25:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB269_27:
+ jne .LBB269_28
+# BB#29:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB269_30
+.LBB269_28:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB269_30:
+ jne .LBB269_31
+# BB#32:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB269_33
+.LBB269_31:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB269_33:
+ jne .LBB269_34
+# BB#35:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB269_36
+.LBB269_34:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB269_36:
+ jne .LBB269_37
+# BB#38:
+ movl $0, %ebx
+ jmp .LBB269_39
+.LBB269_37:
+ movl 20(%ebp), %ebx
+.LBB269_39:
+ jne .LBB269_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB269_42
+.LBB269_40:
+ movl 16(%ebp), %edi
+.LBB269_42:
+ jne .LBB269_43
+# BB#44:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB269_45
+.LBB269_43:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB269_45:
+ jne .LBB269_46
+# BB#47:
+ xorl %eax, %eax
+ jmp .LBB269_48
+.LBB269_46:
+ movl 8(%eax), %eax
+.LBB269_48:
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 72(%ecx)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 80(%ecx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 84(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 88(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 120(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 124(%ecx)
+ movl %eax, 128(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%ecx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17Lbmi2, .Lfunc_end269-mcl_fpDbl_sub17Lbmi2
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86.s
new file mode 100644
index 000000000..cdd988ad3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86.s
@@ -0,0 +1,73785 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 16, 0x90
+ .type makeNIST_P192L,@function
+makeNIST_P192L: # @makeNIST_P192L
+# BB#0:
+ movl 4(%esp), %eax
+ movl $-1, 20(%eax)
+ movl $-1, 16(%eax)
+ movl $-1, 12(%eax)
+ movl $-2, 8(%eax)
+ movl $-1, 4(%eax)
+ movl $-1, (%eax)
+ retl $4
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 32(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ xorl %edx, %edx
+ movl (%eax), %ebx
+ addl %ecx, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ adcl %edi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ adcl %esi, %ebp
+ movl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ adcl %ecx, %esi
+ movl 40(%eax), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ adcl %ebx, %ecx
+ movl 44(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 20(%eax), %eax
+ adcl %edi, %eax
+ adcl $0, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl %ebx, 24(%esp) # 4-byte Folded Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %eax
+ adcl $0, %edx
+ adcl $0, %edi
+ addl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, 28(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ adcl $0, %ecx
+ adcl $0, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $1, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ adcl $0, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edx
+ adcl $0, %edx
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB1_2
+# BB#1:
+ movl %edx, %eax
+.LBB1_2:
+ testb %bl, %bl
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB1_4
+# BB#3:
+ movl %esi, %edx
+.LBB1_4:
+ movl 52(%esp), %esi
+ movl %edx, (%esi)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB1_6
+# BB#5:
+ movl %ebp, %ebx
+.LBB1_6:
+ movl %ebx, 4(%esi)
+ jne .LBB1_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB1_8:
+ movl %edx, 8(%esi)
+ jne .LBB1_10
+# BB#9:
+ movl 12(%esp), %edi # 4-byte Reload
+.LBB1_10:
+ movl %edi, 12(%esi)
+ jne .LBB1_12
+# BB#11:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB1_12:
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L2$pb
+.L2$pb:
+ popl %ebx
+.Ltmp0:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_sqrPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB2_2
+# BB#1:
+ movl %ebp, %edx
+.LBB2_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB2_4
+# BB#3:
+ movl %esi, %ebx
+.LBB2_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB2_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB2_6:
+ movl %ebx, 4(%esi)
+ jne .LBB2_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB2_8:
+ movl %edi, 8(%esi)
+ jne .LBB2_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB2_10:
+ movl %eax, 12(%esi)
+ jne .LBB2_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB2_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L3$pb
+.L3$pb:
+ popl %ebx
+.Ltmp1:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, 8(%esp)
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB3_2
+# BB#1:
+ movl %ebp, %edx
+.LBB3_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB3_4
+# BB#3:
+ movl %esi, %ebx
+.LBB3_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB3_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB3_6:
+ movl %ebx, 4(%esi)
+ jne .LBB3_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB3_8:
+ movl %edi, 8(%esi)
+ jne .LBB3_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB3_10:
+ movl %eax, 12(%esi)
+ jne .LBB3_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB3_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ecx
+ movl 124(%ecx), %edx
+ movl 128(%ecx), %esi
+ movl %esi, %eax
+ shldl $23, %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 84(%ecx), %edi
+ shldl $23, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ shldl $23, %edx, %edi
+ movl 76(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl 72(%ecx), %ebx
+ shldl $23, %ebx, %eax
+ movl 68(%ecx), %ebp
+ shldl $23, %ebp, %ebx
+ shrl $9, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ shldl $23, %esi, %ebp
+ andl $511, %esi # imm = 0x1FF
+ addl (%ecx), %ebp
+ adcl 4(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%ecx), %eax
+ adcl 12(%ecx), %edx
+ adcl 16(%ecx), %edi
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 20(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 28(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%ecx), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 36(%ecx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 40(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ adcl 44(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 48(%ecx), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 52(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 56(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 60(%ecx), %ebx
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ shrl $9, %ecx
+ andl $1, %ecx
+ addl %ebp, %ecx
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebx, %ebp
+ adcl $0, %ebp
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %ecx, %edi
+ andl %eax, %edi
+ andl %edx, %edi
+ andl %esi, %edi
+ andl 28(%esp), %edi # 4-byte Folded Reload
+ andl 32(%esp), %edi # 4-byte Folded Reload
+ andl 36(%esp), %edi # 4-byte Folded Reload
+ andl 40(%esp), %edi # 4-byte Folded Reload
+ andl 44(%esp), %edi # 4-byte Folded Reload
+ andl 48(%esp), %edi # 4-byte Folded Reload
+ andl 24(%esp), %edi # 4-byte Folded Reload
+ andl 52(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ andl %esi, %edi
+ andl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ movl 16(%esp), %ebx # 4-byte Reload
+ andl %ebp, %edi
+ movl %ebp, %eax
+ movl %edx, %ebp
+ orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00
+ andl %edi, %ebp
+ andl %ebx, %ebp
+ cmpl $-1, %ebp
+ movl 80(%esp), %edi
+ je .LBB4_1
+# BB#3: # %nonzero
+ movl %ecx, (%edi)
+ movl %ebx, 4(%edi)
+ movl (%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%edi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%edi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%edi)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%edi)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%edi)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%edi)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%edi)
+ movl %esi, 52(%edi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%edi)
+ movl %eax, 60(%edi)
+ andl $511, %edx # imm = 0x1FF
+ movl %edx, 64(%edi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ xorl %eax, %eax
+ movl $17, %ecx
+ rep;stosl
+.LBB4_2: # %zero
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull 12(%esp)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ mull (%ecx)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 16, 0x90
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: # @mcl_fp_mont1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB8_2
+# BB#1:
+ movl %eax, %edx
+.LBB8_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 16, 0x90
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: # @mcl_fp_montNF1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ movl %edx, %eax
+ subl %edi, %eax
+ js .LBB9_2
+# BB#1:
+ movl %eax, %edx
+.LBB9_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 16, 0x90
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: # @mcl_fp_montRed1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %esi
+ movl 20(%esp), %edx
+ movl -4(%edx), %eax
+ imull %esi, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %esi, %eax
+ adcl 4(%ecx), %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB10_2
+# BB#1:
+ movl %eax, %edx
+.LBB10_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 16, 0x90
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: # @mcl_fp_addPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ retl
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 16, 0x90
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: # @mcl_fp_subPre1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ xorl %eax, %eax
+ movl 8(%esp), %edx
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, (%edx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 16, 0x90
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: # @mcl_fp_shr1_1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ shrl %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 16, 0x90
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: # @mcl_fp_add1L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 20(%esp), %esi
+ subl (%esi), %eax
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movl %eax, (%ecx)
+.LBB14_2: # %carry
+ popl %esi
+ retl
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 16, 0x90
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: # @mcl_fp_addNF1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ addl (%ecx), %eax
+ movl 16(%esp), %edx
+ movl %eax, %ecx
+ subl (%edx), %ecx
+ js .LBB15_2
+# BB#1:
+ movl %ecx, %eax
+.LBB15_2:
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 16, 0x90
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: # @mcl_fp_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ xorl %edx, %edx
+ movl 8(%esp), %ecx
+ movl 16(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, (%ecx)
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB16_2
+# BB#1: # %nocarry
+ popl %esi
+ retl
+.LBB16_2: # %carry
+ movl 20(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ popl %esi
+ retl
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 16, 0x90
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: # @mcl_fp_subNF1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ movl 12(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl 16(%esp), %edx
+ andl (%edx), %ecx
+ addl %eax, %ecx
+ movl 4(%esp), %eax
+ movl %ecx, (%eax)
+ retl
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 16, 0x90
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: # @mcl_fpDbl_add1L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ movl 16(%esp), %esi
+ addl (%esi), %edx
+ movl 12(%esp), %ecx
+ adcl 4(%esi), %eax
+ movl %edx, (%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ movl %eax, %edx
+ subl (%esi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB18_2
+# BB#1:
+ movl %edx, %eax
+.LBB18_2:
+ movl %eax, 4(%ecx)
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ xorl %ecx, %ecx
+ movl 16(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %eax
+ movl 8(%esp), %edx
+ movl %esi, (%edx)
+ sbbl $0, %ecx
+ andl $1, %ecx
+ je .LBB19_2
+# BB#1:
+ movl 20(%esp), %ecx
+ movl (%ecx), %ecx
+.LBB19_2:
+ addl %eax, %ecx
+ movl %ecx, 4(%edx)
+ popl %esi
+ retl
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl 20(%esp), %ebx
+ movl %ecx, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%ebx)
+ movl 16(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %edi, %edx
+ movl %edx, 4(%ecx)
+ adcl $0, %esi
+ movl %esi, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 36(%esp), %ebx
+ movl (%ebx), %esi
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl 28(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ebx), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ addl %ebp, %eax
+ adcl $0, %edx
+ addl %ebx, %eax
+ movl 28(%esp), %esi
+ movl %eax, 4(%esi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ecx, %edx
+ movl %edx, 8(%esi)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esi)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull %esi
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl 20(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %esi, %eax
+ adcl $0, %eax
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl %ebx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl %esi, %eax
+ movl %eax, 8(%ecx)
+ adcl %edi, %edx
+ movl %edx, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 16, 0x90
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: # @mcl_fp_mont2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ movl (%ecx), %esi
+ mull %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %esi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl %edx, %ebx
+ movl 60(%esp), %eax
+ movl 4(%eax), %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebp, %ecx
+ adcl %ebx, %esi
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ andl $1, %ebx
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ mull 24(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %ecx, %ebp
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB23_2
+# BB#1:
+ movl %esi, %eax
+.LBB23_2:
+ movl 52(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB23_4
+# BB#3:
+ movl %ecx, %edx
+.LBB23_4:
+ movl %edx, 4(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 16, 0x90
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: # @mcl_fp_montNF2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx
+ movl (%ecx), %ebp
+ mull %ebp
+ movl %eax, %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl -4(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ imull %ecx, %edi
+ movl (%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, %edi
+ movl %eax, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %esi
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %ebx
+ adcl %edi, %ecx
+ adcl $0, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ebx, %eax
+ movl %eax, %edi
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ movl 28(%esp), %edi # 4-byte Reload
+ mull %edi
+ addl %ebx, %ebp
+ adcl %ecx, %eax
+ adcl $0, %esi
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %eax, %edx
+ subl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl %edi, %ecx
+ testl %ecx, %ecx
+ js .LBB24_2
+# BB#1:
+ movl %edx, %eax
+.LBB24_2:
+ movl 56(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB24_4
+# BB#3:
+ movl %ecx, %esi
+.LBB24_4:
+ movl %esi, 4(%edx)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 16, 0x90
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: # @mcl_fp_montRed2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %eax
+ movl -4(%eax), %ecx
+ movl (%eax), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edx
+ movl (%edx), %ebp
+ movl %ebp, %edi
+ imull %ecx, %edi
+ movl 4(%eax), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %edi
+ addl 4(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %eax
+ movl 40(%esp), %edx
+ movl 12(%edx), %eax
+ adcl 4(%edx), %edi
+ adcl 8(%edx), %esi
+ adcl $0, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ imull %edi, %ecx
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 8(%esp) # 4-byte Folded Reload
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ addl (%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %edi, %ebp
+ adcl %esi, %eax
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB25_2
+# BB#1:
+ movl %esi, %eax
+.LBB25_2:
+ movl 36(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB25_4
+# BB#3:
+ movl %ecx, %edx
+.LBB25_4:
+ movl %edx, 4(%esi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 16, 0x90
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: # @mcl_fp_addPre2L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ adcl 4(%edx), %eax
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 16, 0x90
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: # @mcl_fp_subPre2L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ xorl %eax, %eax
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 16, 0x90
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: # @mcl_fp_shr1_2L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ shrl %eax
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 16, 0x90
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: # @mcl_fp_add2L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ addl (%esi), %eax
+ movl 12(%esp), %edx
+ adcl 4(%esi), %ecx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+.LBB29_2: # %carry
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 16, 0x90
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: # @mcl_fp_addNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 16(%esp), %edx
+ addl (%edx), %ecx
+ adcl 4(%edx), %eax
+ movl 24(%esp), %edi
+ movl %ecx, %esi
+ subl (%edi), %esi
+ movl %eax, %edx
+ sbbl 4(%edi), %edx
+ testl %edx, %edx
+ js .LBB30_2
+# BB#1:
+ movl %esi, %ecx
+.LBB30_2:
+ movl 12(%esp), %esi
+ movl %ecx, (%esi)
+ js .LBB30_4
+# BB#3:
+ movl %edx, %eax
+.LBB30_4:
+ movl %eax, 4(%esi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 16, 0x90
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: # @mcl_fp_sub2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ xorl %ebx, %ebx
+ movl 24(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl 16(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB31_2
+# BB#1: # %carry
+ movl 28(%esp), %esi
+ movl 4(%esi), %edi
+ addl (%esi), %ecx
+ movl %ecx, (%edx)
+ adcl %eax, %edi
+ movl %edi, 4(%edx)
+.LBB31_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 16, 0x90
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: # @mcl_fp_subNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 20(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl %eax, %edx
+ sarl $31, %edx
+ movl 24(%esp), %esi
+ movl 4(%esi), %edi
+ andl %edx, %edi
+ andl (%esi), %edx
+ addl %ecx, %edx
+ movl 12(%esp), %ecx
+ movl %edx, (%ecx)
+ adcl %eax, %edi
+ movl %edi, 4(%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 16, 0x90
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: # @mcl_fpDbl_add2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 12(%edx), %esi
+ movl 24(%esp), %edi
+ movl 12(%edi), %eax
+ movl 8(%edx), %ecx
+ movl (%edx), %ebx
+ movl 4(%edx), %ebp
+ addl (%edi), %ebx
+ adcl 4(%edi), %ebp
+ movl 20(%esp), %edx
+ adcl 8(%edi), %ecx
+ movl %ebx, (%edx)
+ movl %ebp, 4(%edx)
+ adcl %esi, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ movl %ecx, %esi
+ subl (%ebp), %esi
+ movl %eax, %edi
+ sbbl 4(%ebp), %edi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB33_2
+# BB#1:
+ movl %edi, %eax
+.LBB33_2:
+ testb %bl, %bl
+ jne .LBB33_4
+# BB#3:
+ movl %esi, %ecx
+.LBB33_4:
+ movl %ecx, 8(%edx)
+ movl %eax, 12(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %ebx, %ebx
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %eax
+ sbbl 8(%edx), %eax
+ movl 12(%edx), %ebp
+ movl 12(%ecx), %edx
+ movl 20(%esp), %ecx
+ movl %esi, (%ecx)
+ movl %edi, 4(%ecx)
+ sbbl %ebp, %edx
+ movl 32(%esp), %edi
+ movl (%edi), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB34_1
+# BB#2:
+ xorl %edi, %edi
+ jmp .LBB34_3
+.LBB34_1:
+ movl 4(%edi), %edi
+.LBB34_3:
+ testb %bl, %bl
+ jne .LBB34_5
+# BB#4:
+ xorl %esi, %esi
+.LBB34_5:
+ addl %eax, %esi
+ movl %esi, 8(%ecx)
+ adcl %edx, %edi
+ movl %edi, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %ecx
+ movl 28(%esp), %edi
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull (%edi)
+ movl 24(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl $0, %esi
+ movl %esi, 12(%ecx)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %edx
+ movl (%edx), %edi
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ecx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %edi
+ movl %eax, %ebx
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 48(%esp), %ecx
+ movl %eax, 4(%ecx)
+ adcl %edi, %ebp
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax
+ movl 8(%eax), %edi
+ sbbl %ecx, %ecx
+ movl (%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %edi
+ andl $1, %ecx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ addl %ebx, %ebp
+ movl 48(%esp), %edi
+ movl %ebp, 8(%edi)
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 12(%edi)
+ movl %ecx, 16(%edi)
+ adcl %edx, %eax
+ movl %eax, 20(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 8(%eax), %ebp
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, %ecx
+ movl 52(%esp), %edx
+ movl %eax, (%edx)
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ movl %esi, %ebp
+ adcl %ebx, %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ adcl $0, %eax
+ addl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %ecx, 4(%edx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, %eax
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl %esi, %ebp
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl %edi, %ecx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %esi
+ movl %ebp, 8(%esi)
+ adcl %edx, %eax
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %ebx, %eax
+ adcl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %eax, 12(%edx)
+ movl %ecx, 16(%edx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 16, 0x90
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: # @mcl_fp_mont3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %edx, %ebp
+ movl (%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 12(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl $0, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 4(%esp), %edi # 4-byte Reload
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ adcl %edx, %ebx
+ adcl %esi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %eax, %edx
+ addl %ebp, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %edx, %esi
+ imull 52(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ addl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ addl %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ adcl %ebp, %edi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ imull %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ andl $1, %ecx
+ movl %ebp, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull 44(%esp) # 4-byte Folded Reload
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %eax, %ebx
+ subl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB38_2
+# BB#1:
+ movl %ebx, %eax
+.LBB38_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ testb %cl, %cl
+ jne .LBB38_4
+# BB#3:
+ movl %edi, %edx
+.LBB38_4:
+ movl %edx, 4(%ebx)
+ jne .LBB38_6
+# BB#5:
+ movl %esi, %ebp
+.LBB38_6:
+ movl %ebp, 8(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 16, 0x90
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: # @mcl_fp_montNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 4(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%ebp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ movl 20(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl $0, %edi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ addl %esi, %edx
+ adcl %ecx, %ebx
+ adcl %edi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %edx, %ecx
+ movl %edx, %edi
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 8(%eax), %edi
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ adcl $0, %ebx
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %eax
+ adcl %ecx, %esi
+ adcl $0, %ebx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edi
+ subl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ sbbl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB39_2
+# BB#1:
+ movl %edi, %eax
+.LBB39_2:
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ js .LBB39_4
+# BB#3:
+ movl %edx, %esi
+.LBB39_4:
+ movl %esi, 4(%edi)
+ js .LBB39_6
+# BB#5:
+ movl %ecx, %ebx
+.LBB39_6:
+ movl %ebx, 8(%edi)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 16, 0x90
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: # @mcl_fp_montRed3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx
+ movl (%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 8(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ addl %ebp, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ebx), %ecx
+ adcl 8(%ebx), %esi
+ adcl 12(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl 16(%ebx), %edx
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %ecx, %edi
+ imull 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ecx, %eax
+ adcl %esi, %edi
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, 12(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 36(%esp), %ecx # 4-byte Reload
+ imull %edi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ mull 28(%esp) # 4-byte Folded Reload
+ addl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edi, %ecx
+ adcl %ebp, %eax
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB40_2
+# BB#1:
+ movl %ebp, %eax
+.LBB40_2:
+ movl 60(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB40_4
+# BB#3:
+ movl %edi, %edx
+.LBB40_4:
+ movl %edx, 4(%ebp)
+ jne .LBB40_6
+# BB#5:
+ movl %ecx, %esi
+.LBB40_6:
+ movl %esi, 8(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 16, 0x90
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: # @mcl_fp_addPre3L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 12(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ movl %eax, 8(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 16, 0x90
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: # @mcl_fp_subPre3L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 20(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl %esi, 4(%edi)
+ movl %ecx, 8(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 16, 0x90
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: # @mcl_fp_shr1_3L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl 8(%eax), %ecx
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl 8(%esp), %esi
+ movl %edx, (%esi)
+ shrdl $1, %ecx, %eax
+ movl %eax, 4(%esi)
+ shrl %ecx
+ movl %ecx, 8(%esi)
+ popl %esi
+ retl
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 16, 0x90
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: # @mcl_fp_add3L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 20(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl 16(%esp), %esi
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ sbbl 8(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+.LBB44_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 16, 0x90
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: # @mcl_fp_addNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %edx
+ adcl 4(%esi), %ecx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 32(%esp), %ebp
+ movl %edx, %ebx
+ subl (%ebp), %ebx
+ movl %ecx, %edi
+ sbbl 4(%ebp), %edi
+ movl %eax, %esi
+ sbbl 8(%ebp), %esi
+ movl %esi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB45_2
+# BB#1:
+ movl %ebx, %edx
+.LBB45_2:
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ js .LBB45_4
+# BB#3:
+ movl %edi, %ecx
+.LBB45_4:
+ movl %ecx, 4(%ebx)
+ js .LBB45_6
+# BB#5:
+ movl %esi, %eax
+.LBB45_6:
+ movl %eax, 8(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 16, 0x90
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: # @mcl_fp_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ xorl %ebx, %ebx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl 20(%esp), %esi
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB46_2
+# BB#1: # %carry
+ movl 32(%esp), %edi
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ebp
+ addl (%edi), %ecx
+ movl %ecx, (%esi)
+ adcl %eax, %ebx
+ movl %ebx, 4(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%esi)
+.LBB46_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 16, 0x90
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: # @mcl_fp_subNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %edi
+ shldl $1, %eax, %edi
+ movl 32(%esp), %ebx
+ andl (%ebx), %edi
+ movl 8(%ebx), %ebp
+ andl %esi, %ebp
+ andl 4(%ebx), %esi
+ addl %ecx, %edi
+ adcl %edx, %esi
+ movl 20(%esp), %ecx
+ movl %edi, (%ecx)
+ movl %esi, 4(%ecx)
+ adcl %eax, %ebp
+ movl %ebp, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 16, 0x90
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: # @mcl_fpDbl_add3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 16(%esi), %edi
+ movl 12(%esi), %ebx
+ movl (%esi), %edx
+ movl 28(%esp), %eax
+ addl (%eax), %edx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%esi), %edx
+ movl 4(%esi), %esi
+ adcl 4(%eax), %esi
+ adcl 8(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 20(%eax), %ebp
+ movl %edx, 8(%ecx)
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl %ebx, %esi
+ adcl %edi, %edx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 36(%esp), %ecx
+ movl %esi, %ebx
+ subl (%ecx), %ebx
+ movl %edx, %edi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl 36(%esp), %edi
+ sbbl 8(%edi), %ecx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB48_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB48_2:
+ testb %al, %al
+ jne .LBB48_4
+# BB#3:
+ movl %ebx, %esi
+.LBB48_4:
+ movl 24(%esp), %eax
+ movl %esi, 12(%eax)
+ jne .LBB48_6
+# BB#5:
+ movl (%esp), %edx # 4-byte Reload
+.LBB48_6:
+ movl %edx, 16(%eax)
+ movl %ebp, 20(%eax)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ movl 28(%esp), %ebx
+ subl (%ebx), %edx
+ sbbl 4(%ebx), %esi
+ movl 8(%ecx), %ebp
+ sbbl 8(%ebx), %ebp
+ movl 20(%esp), %eax
+ movl %edx, (%eax)
+ movl 12(%ecx), %edi
+ sbbl 12(%ebx), %edi
+ movl %esi, 4(%eax)
+ movl 16(%ecx), %esi
+ sbbl 16(%ebx), %esi
+ movl 20(%ebx), %ebx
+ movl 20(%ecx), %edx
+ movl %ebp, 8(%eax)
+ sbbl %ebx, %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %ebp
+ jne .LBB49_1
+# BB#2:
+ xorl %ebx, %ebx
+ jmp .LBB49_3
+.LBB49_1:
+ movl 8(%ebp), %ebx
+.LBB49_3:
+ testb %cl, %cl
+ movl $0, %eax
+ jne .LBB49_4
+# BB#5:
+ xorl %ecx, %ecx
+ jmp .LBB49_6
+.LBB49_4:
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+.LBB49_6:
+ addl %edi, %ecx
+ adcl %esi, %eax
+ movl 20(%esp), %esi
+ movl %ecx, 12(%esi)
+ movl %eax, 16(%esi)
+ adcl %edx, %ebx
+ movl %ebx, 20(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %ecx
+ movl 36(%esp), %ebp
+ movl %ecx, %eax
+ mull 12(%ebp)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%ebp)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%ebp)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%ebp)
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ adcl $0, %esi
+ movl %esi, 16(%ecx)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl %ecx, %ebp
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%edi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %ecx
+ movl %eax, %ebp
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %ebx, %edi
+ adcl %ebp, %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl (%esp), %eax # 4-byte Folded Reload
+ movl 76(%esp), %edx
+ movl %eax, 4(%edx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 76(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl %ecx, %ebp
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 12(%eax), %esi
+ sbbl %ecx, %ecx
+ movl %esi, %eax
+ movl 80(%esp), %edi
+ mull 12(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ movl 80(%esp), %edx
+ mull (%edx)
+ movl %eax, %esi
+ andl $1, %ecx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ addl %esi, %ebp
+ movl 76(%esp), %esi
+ movl %ebp, 12(%esi)
+ adcl %edi, %ebx
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ movl %ebx, 16(%edx)
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 20(%edx)
+ movl %ecx, 24(%edx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ecx
+ movl 12(%ecx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl (%ecx), %ebx
+ movl 4(%ecx), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %edi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull %ebx
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %esi, %edx
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %esi, %edx
+ movl 60(%esp), %esi
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %eax, %ebx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %eax
+ movl %ebx, 8(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 12(%esi), %ebp
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %esi, %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull (%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ addl %ecx, %edi
+ movl 60(%esp), %ebp
+ movl %edi, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, %edi
+ movl %esi, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl %eax, 24(%edi)
+ adcl %edx, %ecx
+ movl %ecx, 28(%edi)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 16, 0x90
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: # @mcl_fp_mont4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl -4(%edi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 4(%edi), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 4(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 12(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 28(%esp), %esi # 4-byte Reload
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ addl 8(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 116(%esp), %eax
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ecx
+ imull 80(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %esi
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl $0, %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl %edi, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ecx, %esi
+ imull 80(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %esi
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 12(%eax), %ebp
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %eax
+ adcl $0, %eax
+ movl 64(%esp), %edi # 4-byte Reload
+ addl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 80(%esp), %esi # 4-byte Reload
+ imull %edi, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull 68(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 76(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB53_2
+# BB#1:
+ movl %ebp, %eax
+.LBB53_2:
+ movl 108(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB53_4
+# BB#3:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB53_4:
+ movl %edx, 4(%ebp)
+ jne .LBB53_6
+# BB#5:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB53_6:
+ movl %esi, 8(%ebp)
+ jne .LBB53_8
+# BB#7:
+ movl %ecx, %edi
+.LBB53_8:
+ movl %edi, 12(%ebp)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 16, 0x90
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: # @mcl_fp_montNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 8(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 12(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ movl 36(%esp), %ebx # 4-byte Reload
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %ebx, %esi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ addl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %esi, %edi
+ adcl %ebx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl %edi, %ebx
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl %edi, %ecx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %edx, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edx, %edi
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ addl %edi, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ imull %edx, %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edi, %ebp
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ addl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl %ecx, %eax
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %edi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %ebp
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ testl %ecx, %ecx
+ js .LBB54_2
+# BB#1:
+ movl %edx, %eax
+.LBB54_2:
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB54_4
+# BB#3:
+ movl %ebp, %edi
+.LBB54_4:
+ movl %edi, 4(%edx)
+ js .LBB54_6
+# BB#5:
+ movl 80(%esp), %esi # 4-byte Reload
+.LBB54_6:
+ movl %esi, 8(%edx)
+ js .LBB54_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB54_8:
+ movl %ebx, 12(%edx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 16, 0x90
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: # @mcl_fp_montRed4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 92(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ imull %edx, %esi
+ movl 12(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 4(%eax), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl %ebx, %edi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %ebp
+ adcl 12(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl 24(%ecx), %edx
+ movl 20(%ecx), %ecx
+ adcl $0, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %edi, %eax
+ adcl %ebp, %esi
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %esi, %ebp
+ imull 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %esi, %eax
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %ebp, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl %ebp, %esi
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl %eax, %ebp
+ subl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB55_2
+# BB#1:
+ movl %ebp, %eax
+.LBB55_2:
+ movl 84(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %edi, %eax
+ testb %al, %al
+ jne .LBB55_4
+# BB#3:
+ movl %esi, %edx
+.LBB55_4:
+ movl %edx, 4(%ebp)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB55_6
+# BB#5:
+ movl 60(%esp), %eax # 4-byte Reload
+.LBB55_6:
+ movl %eax, 8(%ebp)
+ jne .LBB55_8
+# BB#7:
+ movl %ebx, %ecx
+.LBB55_8:
+ movl %ecx, 12(%ebp)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 16, 0x90
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: # @mcl_fp_addPre4L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 12(%esi), %esi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl %edx, 4(%ebx)
+ movl %eax, 8(%ebx)
+ adcl %edi, %esi
+ movl %esi, 12(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 16, 0x90
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: # @mcl_fp_subPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 12(%edi), %edi
+ movl 12(%ecx), %ecx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl %esi, 4(%ebp)
+ movl %ebx, 8(%ebp)
+ sbbl %edi, %ecx
+ movl %ecx, 12(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 16, 0x90
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: # @mcl_fp_shr1_4L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 8(%eax), %edx
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl 12(%esp), %edi
+ movl %esi, (%edi)
+ shrdl $1, %edx, %eax
+ movl %eax, 4(%edi)
+ shrdl $1, %ecx, %edx
+ movl %edx, 8(%edi)
+ shrl %ecx
+ movl %ecx, 12(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 16, 0x90
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: # @mcl_fp_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edi), %edx
+ adcl 8(%esi), %edx
+ movl 12(%esi), %esi
+ adcl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+.LBB59_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 16, 0x90
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: # @mcl_fp_addNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ecx
+ movl 32(%esp), %edi
+ addl (%edi), %esi
+ adcl 4(%edi), %ecx
+ movl 12(%edx), %ebp
+ movl 8(%edx), %edx
+ adcl 8(%edi), %edx
+ adcl 12(%edi), %ebp
+ movl 40(%esp), %eax
+ movl %esi, %ebx
+ subl (%eax), %ebx
+ movl %ecx, %edi
+ sbbl 4(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 40(%esp), %eax
+ sbbl 8(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 40(%esp), %eax
+ sbbl 12(%eax), %edi
+ testl %edi, %edi
+ js .LBB60_2
+# BB#1:
+ movl %ebx, %esi
+.LBB60_2:
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ js .LBB60_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB60_4:
+ movl %ecx, 4(%ebx)
+ js .LBB60_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB60_6:
+ movl %edx, 8(%ebx)
+ js .LBB60_8
+# BB#7:
+ movl %edi, %ebp
+.LBB60_8:
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 16, 0x90
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: # @mcl_fp_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ movl 8(%esi), %edx
+ sbbl 8(%edi), %edx
+ movl 12(%esi), %esi
+ sbbl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB61_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl 8(%ebx), %ebp
+ adcl 4(%ebx), %ecx
+ movl 12(%ebx), %ebx
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%edi)
+ adcl %esi, %ebx
+ movl %ebx, 12(%edi)
+.LBB61_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 16, 0x90
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: # @mcl_fp_subNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 36(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl 8(%eax), %edx
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %edi
+ movl %edi, %esi
+ sarl $31, %esi
+ movl 40(%esp), %eax
+ movl 12(%eax), %ebp
+ andl %esi, %ebp
+ movl 8(%eax), %ecx
+ andl %esi, %ecx
+ movl 40(%esp), %eax
+ movl 4(%eax), %eax
+ andl %esi, %eax
+ movl 40(%esp), %ebx
+ andl (%ebx), %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ adcl %edx, %ecx
+ movl %eax, 4(%ebx)
+ movl %ecx, 8(%ebx)
+ adcl %edi, %ebp
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 16, 0x90
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: # @mcl_fpDbl_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 36(%esp), %esi
+ addl (%esi), %edi
+ adcl 4(%esi), %edx
+ movl 8(%eax), %ebx
+ adcl 8(%esi), %ebx
+ movl 12(%esi), %ebp
+ movl 32(%esp), %ecx
+ movl %edi, (%ecx)
+ movl 16(%esi), %edi
+ adcl 12(%eax), %ebp
+ adcl 16(%eax), %edi
+ movl %edx, 4(%ecx)
+ movl 28(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, 8(%ecx)
+ movl 24(%eax), %ebx
+ movl 20(%eax), %eax
+ movl %ebp, 12(%ecx)
+ movl 20(%esi), %edx
+ adcl %eax, %edx
+ movl 28(%esi), %ecx
+ movl 24(%esi), %ebp
+ adcl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 44(%esp), %eax
+ movl %edi, %esi
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 4(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ecx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB63_2
+# BB#1:
+ movl %esi, %ebp
+.LBB63_2:
+ testb %bl, %bl
+ jne .LBB63_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB63_4:
+ movl 32(%esp), %eax
+ movl %edi, 16(%eax)
+ jne .LBB63_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB63_6:
+ movl %edx, 20(%eax)
+ movl %ebp, 24(%eax)
+ movl 8(%esp), %edx # 4-byte Reload
+ jne .LBB63_8
+# BB#7:
+ movl %ecx, %edx
+.LBB63_8:
+ movl %edx, 28(%eax)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 32(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %ebx
+ sbbl 8(%ebp), %ebx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%ebp), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %ebx
+ sbbl %esi, %ebx
+ movl 24(%ebp), %edx
+ movl 24(%eax), %esi
+ sbbl %edx, %esi
+ movl 28(%ebp), %edx
+ movl 28(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 36(%esp), %ecx
+ movl (%ecx), %eax
+ jne .LBB64_1
+# BB#2:
+ xorl %ebp, %ebp
+ jmp .LBB64_3
+.LBB64_1:
+ movl 4(%ecx), %ebp
+.LBB64_3:
+ testb %dl, %dl
+ jne .LBB64_5
+# BB#4:
+ movl $0, %eax
+.LBB64_5:
+ jne .LBB64_6
+# BB#7:
+ movl $0, %edx
+ jmp .LBB64_8
+.LBB64_6:
+ movl 12(%ecx), %edx
+.LBB64_8:
+ jne .LBB64_9
+# BB#10:
+ xorl %ecx, %ecx
+ jmp .LBB64_11
+.LBB64_9:
+ movl 8(%ecx), %ecx
+.LBB64_11:
+ addl %edi, %eax
+ adcl %ebx, %ebp
+ movl 24(%esp), %edi
+ movl %eax, 16(%edi)
+ adcl %esi, %ecx
+ movl %ebp, 20(%edi)
+ movl %ecx, 24(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl 44(%esp), %ecx
+ movl %esi, %eax
+ mull 16(%ecx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ecx)
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ecx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ecx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ecx)
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 20(%ecx)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ebp
+ movl 92(%esp), %eax
+ movl (%eax), %ebx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl %eax, (%edx)
+ movl %esi, %eax
+ movl 4(%eax), %esi
+ movl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 4(%edi), %edi
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl (%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl %eax, %esi
+ movl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ movl 8(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl (%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ addl %edi, %eax
+ movl 84(%esp), %ecx
+ movl %eax, 8(%ecx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ebx, %ecx
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl %edx, %ecx
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 12(%eax), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl 84(%esp), %edx
+ movl %eax, 12(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 92(%esp), %eax
+ movl 16(%eax), %ebp
+ sbbl %ecx, %ecx
+ movl %ebp, %eax
+ movl 88(%esp), %esi
+ mull 16(%esi)
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%esi)
+ movl %eax, %ebp
+ andl $1, %ecx
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ addl %ebp, %edi
+ movl 84(%esp), %ebp
+ movl %edi, 16(%ebp)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ movl %ebx, 20(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 24(%edx)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 28(%edx)
+ movl %ecx, 32(%edx)
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebx
+ movl 16(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl 4(%ebx), %ecx
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, %eax
+ mull %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ addl %ecx, %edx
+ adcl %esi, %ebp
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ecx, %edx
+ movl 80(%esp), %ecx
+ movl %edx, 4(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl 12(%ecx), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl (%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ movl 80(%esp), %eax
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax
+ movl %ecx, 12(%eax)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ sbbl %ecx, %ecx
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 12(%edx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 4(%edx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull (%edx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ andl $1, %ecx
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ebx
+ movl %ebp, 16(%ebx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 20(%ebx)
+ adcl %edx, %ebp
+ movl %edi, 24(%ebx)
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 16, 0x90
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: # @mcl_fp_mont5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 136(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 4(%esi), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 12(%ebx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 84(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl %edi, 80(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 96(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 84(%esp), %ecx # 4-byte Folded Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ecx
+ subl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 92(%esp), %ebp # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB68_2
+# BB#1:
+ movl 88(%esp), %edx # 4-byte Reload
+.LBB68_2:
+ testb %bl, %bl
+ jne .LBB68_4
+# BB#3:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB68_4:
+ movl 132(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB68_6
+# BB#5:
+ movl %ecx, %esi
+.LBB68_6:
+ movl %esi, 8(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB68_8
+# BB#7:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB68_8:
+ movl %eax, 12(%ebx)
+ jne .LBB68_10
+# BB#9:
+ movl %ebp, %edi
+.LBB68_10:
+ movl %edi, 16(%ebx)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 16, 0x90
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: # @mcl_fp_montNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 136(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 68(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ adcl %edi, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, %edi
+ imull 84(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ imull 84(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ecx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl %ebp, %edx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %esi, %ecx
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ addl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ imull %eax, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 52(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebx
+ subl 100(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, %edx
+ sbbl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ sbbl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ sbbl 96(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %edi, %edx
+ movl %edi, %esi
+ sbbl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %edx
+ sarl $31, %edx
+ testl %edx, %edx
+ js .LBB69_2
+# BB#1:
+ movl %ebx, %eax
+.LBB69_2:
+ movl 124(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB69_4
+# BB#3:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB69_4:
+ movl %ecx, 4(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB69_6
+# BB#5:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB69_6:
+ movl %eax, 8(%edx)
+ js .LBB69_8
+# BB#7:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB69_8:
+ movl %ebp, 12(%edx)
+ js .LBB69_10
+# BB#9:
+ movl %edi, %esi
+.LBB69_10:
+ movl %esi, 16(%edx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 16, 0x90
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: # @mcl_fp_montRed5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl (%esi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 16(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esi), %ebx
+ adcl 8(%esi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 12(%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 20(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl 32(%esi), %ecx
+ movl 28(%esi), %edx
+ movl 24(%esi), %esi
+ adcl $0, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ imull 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, %ebp
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %edi
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %esi
+ imull 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 16(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ imull %ebx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ mull 60(%esp) # 4-byte Folded Reload
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %ebx, %esi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 60(%esp), %esi # 4-byte Folded Reload
+ sbbl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sbbl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ jne .LBB70_2
+# BB#1:
+ movl %esi, %edx
+.LBB70_2:
+ movl 80(%esp), %ebx # 4-byte Reload
+ testb %bl, %bl
+ jne .LBB70_4
+# BB#3:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB70_4:
+ movl 108(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB70_6
+# BB#5:
+ movl %ebp, %eax
+.LBB70_6:
+ movl %eax, 8(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB70_8
+# BB#7:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB70_8:
+ movl %eax, 12(%ecx)
+ jne .LBB70_10
+# BB#9:
+ movl 84(%esp), %edi # 4-byte Reload
+.LBB70_10:
+ movl %edi, 16(%ecx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 16, 0x90
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: # @mcl_fp_addPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 24(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 12(%esi), %ebx
+ movl 16(%esi), %esi
+ adcl 12(%eax), %ebx
+ movl 16(%eax), %eax
+ movl 20(%esp), %ebp
+ movl %ecx, (%ebp)
+ movl %edx, 4(%ebp)
+ movl %edi, 8(%ebp)
+ movl %ebx, 12(%ebp)
+ adcl %esi, %eax
+ movl %eax, 16(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 16, 0x90
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: # @mcl_fp_subPre5L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%esi), %edx
+ movl 16(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 16(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 16, 0x90
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: # @mcl_fp_shr1_5L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl 16(%eax), %ecx
+ movl 12(%eax), %edx
+ movl 8(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edi
+ movl 16(%esp), %ebx
+ movl %edi, (%ebx)
+ shrdl $1, %esi, %eax
+ movl %eax, 4(%ebx)
+ shrdl $1, %edx, %esi
+ movl %esi, 8(%ebx)
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%ebx)
+ shrl %ecx
+ movl %ecx, 16(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 16, 0x90
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: # @mcl_fp_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %ecx
+ movl 24(%esp), %edi
+ addl (%edi), %eax
+ adcl 4(%edi), %ecx
+ movl 8(%ebx), %edx
+ adcl 8(%edi), %edx
+ movl 12(%edi), %esi
+ movl 16(%edi), %edi
+ adcl 12(%ebx), %esi
+ adcl 16(%ebx), %edi
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl 16(%ebp), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+.LBB74_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 16, 0x90
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: # @mcl_fp_addNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl 44(%esp), %edi
+ addl (%edi), %ebx
+ adcl 4(%edi), %eax
+ movl 16(%esi), %ecx
+ movl 12(%esi), %edx
+ movl 8(%esi), %ebp
+ adcl 8(%edi), %ebp
+ adcl 12(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl %ebx, %esi
+ subl (%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ sbbl 4(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%edi), %esi
+ sbbl 12(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 16(%edi), %edx
+ movl %edx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB75_2
+# BB#1:
+ movl (%esp), %ebx # 4-byte Reload
+.LBB75_2:
+ movl 40(%esp), %edi
+ movl %ebx, (%edi)
+ js .LBB75_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB75_4:
+ movl %eax, 4(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ js .LBB75_6
+# BB#5:
+ movl %esi, %ebp
+.LBB75_6:
+ movl %ebp, 8(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ js .LBB75_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB75_8:
+ movl %ecx, 12(%edi)
+ js .LBB75_10
+# BB#9:
+ movl %edx, %eax
+.LBB75_10:
+ movl %eax, 16(%edi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 16, 0x90
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: # @mcl_fp_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ movl 8(%edi), %edx
+ sbbl 8(%ebp), %edx
+ movl 12(%edi), %esi
+ sbbl 12(%ebp), %esi
+ movl 16(%edi), %edi
+ sbbl 16(%ebp), %edi
+ movl 20(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %ecx, 4(%ebp)
+ movl %edx, 8(%ebp)
+ movl %esi, 12(%ebp)
+ movl %edi, 16(%ebp)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl %eax, (%ebp)
+ adcl 4(%ebx), %ecx
+ movl %ecx, 4(%ebp)
+ adcl 8(%ebx), %edx
+ movl %edx, 8(%ebp)
+ movl 12(%ebx), %eax
+ adcl %esi, %eax
+ movl %eax, 12(%ebp)
+ movl 16(%ebx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%ebp)
+.LBB76_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 16, 0x90
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: # @mcl_fp_subNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 4(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 16(%edi), %esi
+ movl 12(%edi), %eax
+ movl 8(%edi), %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ movl %ebx, %ebp
+ shldl $1, %esi, %ebp
+ movl 48(%esp), %edi
+ movl 4(%edi), %ecx
+ andl %ebp, %ecx
+ andl (%edi), %ebp
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl 12(%edi), %eax
+ andl %ebx, %eax
+ roll %ebx
+ andl 8(%edi), %ebx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, (%edi)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 8(%edi)
+ movl %eax, 12(%edi)
+ adcl %esi, %edx
+ movl %edx, 16(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 16, 0x90
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: # @mcl_fpDbl_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 12(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 8(%edx), %esi
+ movl (%edx), %edi
+ addl (%ecx), %edi
+ movl 48(%esp), %eax
+ movl %edi, (%eax)
+ movl 4(%edx), %edi
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %esi
+ adcl 12(%edx), %ebx
+ adcl 16(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, 4(%eax)
+ movl 28(%edx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl %esi, 8(%eax)
+ movl 20(%edx), %esi
+ movl %ebx, 12(%eax)
+ movl 20(%ecx), %ebp
+ adcl %esi, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%eax)
+ movl 24(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl 32(%ecx), %esi
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl 36(%ecx), %edx
+ adcl %eax, %edx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebp, %ecx
+ movl 60(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 4(%ebp), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 8(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %edx, %esi
+ sbbl 12(%ebp), %ebx
+ sbbl 16(%ebp), %edx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB78_2
+# BB#1:
+ movl %edx, %esi
+.LBB78_2:
+ testb %al, %al
+ movl 12(%esp), %ebp # 4-byte Reload
+ jne .LBB78_4
+# BB#3:
+ movl (%esp), %ebp # 4-byte Reload
+.LBB78_4:
+ movl 48(%esp), %eax
+ movl %ebp, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %edi # 4-byte Reload
+ jne .LBB78_6
+# BB#5:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB78_6:
+ movl %edi, 24(%eax)
+ jne .LBB78_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB78_8:
+ movl %edx, 28(%eax)
+ jne .LBB78_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB78_10:
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 44(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 36(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %ebp
+ sbbl %esi, %ebp
+ movl 28(%edx), %esi
+ movl 28(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ movl 32(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%edx), %edx
+ movl 36(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 48(%esp), %ebx
+ jne .LBB79_1
+# BB#2:
+ xorl %eax, %eax
+ jmp .LBB79_3
+.LBB79_1:
+ movl 16(%ebx), %eax
+.LBB79_3:
+ testb %dl, %dl
+ jne .LBB79_4
+# BB#5:
+ movl $0, %edx
+ movl $0, %esi
+ jmp .LBB79_6
+.LBB79_4:
+ movl (%ebx), %esi
+ movl 4(%ebx), %edx
+.LBB79_6:
+ jne .LBB79_7
+# BB#8:
+ movl $0, %edi
+ jmp .LBB79_9
+.LBB79_7:
+ movl 12(%ebx), %edi
+.LBB79_9:
+ jne .LBB79_10
+# BB#11:
+ xorl %ebx, %ebx
+ jmp .LBB79_12
+.LBB79_10:
+ movl 8(%ebx), %ebx
+.LBB79_12:
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %esi, 20(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl %ebx, %eax
+ mull 20(%edi)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%edi)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%edi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%edi)
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%edi)
+ movl 48(%esp), %edi
+ movl %eax, (%edi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%edi)
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%edi)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%edi)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 24(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %esi
+ movl (%esi), %ebp
+ movl 112(%esp), %eax
+ movl (%eax), %edi
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%esi), %ebx
+ movl 8(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ movl 112(%esp), %eax
+ movl 4(%eax), %esi
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %eax, %ebx
+ movl %edx, %edi
+ addl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %edx, %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp
+ movl 20(%ebp), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 104(%esp), %ecx
+ movl %eax, 8(%ecx)
+ adcl %edi, %ebp
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx
+ movl %ebp, 12(%ebx)
+ movl %esi, %ebx
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, %ecx
+ movl 20(%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 8(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl 16(%esi), %ecx
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 20(%eax), %ecx
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %edx
+ movl %ebp, 20(%edx)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl %ecx, %ebp
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ movl %edx, %ebx
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 28(%ecx)
+ movl %ebp, %edx
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %esi, 40(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebp
+ movl 4(%esi), %ebx
+ mull %ebx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ movl %ecx, %ebx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl %edi, %edx
+ movl 120(%esp), %edi
+ movl %edx, 4(%edi)
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, %edx
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebx, %esi
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %edi
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 20(%ebx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebp
+ mull %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%ebx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %ebp
+ addl %esi, %ebx
+ movl 120(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebp # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, %ebp
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ mull %edi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 36(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl 120(%esp), %eax
+ movl %esi, 12(%eax)
+ adcl 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx
+ movl (%ecx), %ebx
+ movl 4(%ecx), %edi
+ movl 20(%ecx), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %esi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ addl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 12(%eax), %edi
+ movl 8(%eax), %ebx
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebp
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 16(%ebp)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 20(%ebp)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %edi, 24(%ebp)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 28(%ebp)
+ movl %eax, %edi
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 32(%ebp)
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 36(%ebp)
+ movl %esi, 40(%ebp)
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ebp)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 16, 0x90
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: # @mcl_fp_mont6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%edx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 20(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ mull %ebp
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %ebp, %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 20(%esp), %ebx # 4-byte Reload
+ addl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, %esi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 64(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl %ecx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 52(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 48(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 44(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 100(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 80(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull 104(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 100(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ subl 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edi, %ebx
+ sbbl 120(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebp, %edi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ movl %ebp, %esi
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB83_2
+# BB#1:
+ movl 104(%esp), %edx # 4-byte Reload
+.LBB83_2:
+ testb %bl, %bl
+ jne .LBB83_4
+# BB#3:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB83_4:
+ movl 152(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB83_6
+# BB#5:
+ movl 116(%esp), %ecx # 4-byte Reload
+.LBB83_6:
+ movl %ecx, 8(%ebx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB83_8
+# BB#7:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB83_8:
+ movl %eax, 12(%ebx)
+ jne .LBB83_10
+# BB#9:
+ movl 124(%esp), %edi # 4-byte Reload
+.LBB83_10:
+ movl %edi, 16(%ebx)
+ jne .LBB83_12
+# BB#11:
+ movl 128(%esp), %ebp # 4-byte Reload
+.LBB83_12:
+ movl %ebp, 20(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 16, 0x90
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: # @mcl_fp_montNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %edi
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%ebx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 12(%ebx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, %esi
+ addl 64(%esp), %esi # 4-byte Folded Reload
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %edi # 4-byte Reload
+ addl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ebp
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %esi, %ebp
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %ebx, %ecx
+ imull 96(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl %esi, %edi
+ movl %edi, %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %esi
+ movl %edi, %ecx
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %esi, %ecx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx # 4-byte Reload
+ adcl 100(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 128(%esp), %edx # 4-byte Folded Reload
+ sbbl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ sbbl 120(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ movl %edi, %esi
+ movl %ecx, %edi
+ sbbl 108(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB84_2
+# BB#1:
+ movl %edx, %eax
+.LBB84_2:
+ movl 152(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB84_4
+# BB#3:
+ movl %ebx, %eax
+.LBB84_4:
+ movl %eax, 4(%ecx)
+ movl %ecx, %ebx
+ movl %esi, %eax
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 100(%esp), %edx # 4-byte Reload
+ js .LBB84_6
+# BB#5:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB84_6:
+ movl %edx, 8(%ebx)
+ movl %ebx, %edx
+ js .LBB84_8
+# BB#7:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB84_8:
+ movl %ebp, 12(%edx)
+ js .LBB84_10
+# BB#9:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB84_10:
+ movl %ecx, 16(%edx)
+ js .LBB84_12
+# BB#11:
+ movl %edi, %eax
+.LBB84_12:
+ movl %eax, 20(%edx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 16, 0x90
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: # @mcl_fp_montRed6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 132(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %ebp
+ movl (%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 20(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 4(%eax), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 4(%ebp), %ebx
+ adcl 8(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%ebp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 16(%ebp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%ebp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 24(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 40(%ebp), %edx
+ movl 36(%ebp), %esi
+ movl 32(%ebp), %edi
+ movl 28(%ebp), %ecx
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl %ebx, %esi
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %esi, %ebx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ecx, %ebp
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %ecx
+ imull 96(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, %edi
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ecx, %edx
+ movl %edx, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %ebx, %eax
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ sbbl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ sbbl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ movl %edi, %esi
+ sbbl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB85_2
+# BB#1:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB85_2:
+ testb %bl, %bl
+ jne .LBB85_4
+# BB#3:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB85_4:
+ movl 124(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB85_6
+# BB#5:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB85_6:
+ movl %ecx, 8(%ebx)
+ movl %edi, %ecx
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 96(%esp), %esi # 4-byte Reload
+ jne .LBB85_8
+# BB#7:
+ movl 88(%esp), %esi # 4-byte Reload
+.LBB85_8:
+ movl %esi, 12(%ebx)
+ jne .LBB85_10
+# BB#9:
+ movl 92(%esp), %edi # 4-byte Reload
+.LBB85_10:
+ movl %edi, 16(%ebx)
+ jne .LBB85_12
+# BB#11:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB85_12:
+ movl %ecx, 20(%ebx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 16, 0x90
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: # @mcl_fp_addPre6L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 4(%eax), %ecx
+ adcl 4(%edx), %ecx
+ movl %ecx, 4(%esi)
+ movl 8(%eax), %ecx
+ adcl 8(%edx), %ecx
+ movl %ecx, 8(%esi)
+ movl 12(%edx), %ecx
+ adcl 12(%eax), %ecx
+ movl %ecx, 12(%esi)
+ movl 16(%edx), %ecx
+ adcl 16(%eax), %ecx
+ movl %ecx, 16(%esi)
+ movl 20(%eax), %eax
+ movl 20(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 20(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 16, 0x90
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: # @mcl_fp_subPre6L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%ecx), %edx
+ sbbl 16(%esi), %edx
+ movl %edx, 16(%edi)
+ movl 20(%esi), %edx
+ movl 20(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 20(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 16, 0x90
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: # @mcl_fp_shr1_6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl 20(%eax), %ecx
+ movl 16(%eax), %edx
+ movl 12(%eax), %esi
+ movl 8(%eax), %edi
+ movl (%eax), %ebx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ebx
+ movl 20(%esp), %ebp
+ movl %ebx, (%ebp)
+ shrdl $1, %edi, %eax
+ movl %eax, 4(%ebp)
+ shrdl $1, %esi, %edi
+ movl %edi, 8(%ebp)
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ebp)
+ shrdl $1, %ecx, %edx
+ movl %edx, 16(%ebp)
+ shrl %ecx
+ movl %ecx, 20(%ebp)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 16, 0x90
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: # @mcl_fp_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ebp
+ movl 36(%esp), %ebx
+ addl (%ebx), %edx
+ adcl 4(%ebx), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %edi
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edi
+ movl 20(%ebx), %ebx
+ adcl 20(%eax), %ebx
+ movl 32(%esp), %eax
+ movl %edx, (%eax)
+ movl %ebp, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ecx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 44(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 44(%esp), %esi
+ sbbl 4(%esi), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %ebp
+ sbbl 16(%esi), %edi
+ sbbl 20(%esi), %ebx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movl (%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ecx)
+ movl %edx, 8(%ecx)
+ movl %ebp, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %ebx, 20(%ecx)
+.LBB89_2: # %carry
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 16, 0x90
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: # @mcl_fp_addNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 64(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ adcl 4(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl 16(%eax), %esi
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 12(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 72(%esp), %ebx
+ subl (%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl 72(%esp), %ecx
+ sbbl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 16(%ecx), %edi
+ movl %edx, %esi
+ sbbl 20(%ecx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB90_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB90_2:
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ js .LBB90_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB90_4:
+ movl %ecx, 4(%ebx)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ js .LBB90_6
+# BB#5:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB90_6:
+ movl %ecx, 8(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ js .LBB90_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB90_8:
+ movl %edx, 12(%ebx)
+ js .LBB90_10
+# BB#9:
+ movl %edi, %ecx
+.LBB90_10:
+ movl %ecx, 16(%ebx)
+ js .LBB90_12
+# BB#11:
+ movl %esi, %eax
+.LBB90_12:
+ movl %eax, 20(%ebx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 16, 0x90
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: # @mcl_fp_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ movl 44(%esp), %ecx
+ subl (%ecx), %esi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ sbbl 12(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ sbbl 20(%ecx), %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ testb $1, %cl
+ movl 36(%esp), %ebx
+ movl %esi, (%ebx)
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %edx, 20(%ebx)
+ je .LBB91_2
+# BB#1: # %carry
+ movl 48(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, (%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ adcl 8(%ecx), %edi
+ movl %eax, 4(%ebx)
+ movl 12(%ecx), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl 16(%ecx), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ebx)
+ movl 20(%ecx), %eax
+ adcl %edx, %eax
+ movl %eax, 20(%ebx)
+.LBB91_2: # %nocarry
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 16, 0x90
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: # @mcl_fp_subNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %ebx
+ movl 20(%ebx), %esi
+ movl (%ebx), %ecx
+ movl 4(%ebx), %eax
+ movl 52(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl 12(%ebx), %ecx
+ movl 8(%ebx), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 20(%ebp), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sarl $31, %ebp
+ movl %ebp, %ecx
+ addl %ecx, %ecx
+ movl %ebp, %eax
+ adcl %eax, %eax
+ shrl $31, %edx
+ orl %ecx, %edx
+ movl 56(%esp), %ebx
+ andl 4(%ebx), %eax
+ andl (%ebx), %edx
+ movl 20(%ebx), %edi
+ andl %ebp, %edi
+ movl 16(%ebx), %esi
+ andl %ebp, %esi
+ movl 12(%ebx), %ecx
+ andl %ebp, %ecx
+ andl 8(%ebx), %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 4(%ebx)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ movl %esi, 16(%ebx)
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 16, 0x90
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: # @mcl_fpDbl_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %eax
+ movl 8(%edx), %edi
+ movl (%edx), %ebx
+ addl (%ecx), %ebx
+ movl 56(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%edx), %ebx
+ adcl 4(%ecx), %ebx
+ adcl 8(%ecx), %edi
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %eax
+ movl %ebx, 4(%ebp)
+ movl %edx, %ebx
+ movl 32(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, 8(%ebp)
+ movl 20(%ebx), %edi
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ adcl %edi, %esi
+ movl 24(%ebx), %edi
+ movl %eax, 16(%ebp)
+ movl 24(%ecx), %edx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 28(%ebx), %edi
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 36(%ebx), %esi
+ movl %ebx, %edi
+ movl 36(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%edi), %esi
+ movl 40(%ecx), %edi
+ adcl %esi, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 44(%esi), %esi
+ movl 44(%ecx), %ecx
+ adcl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %edx
+ sbbl 4(%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ sbbl 12(%edx), %ebp
+ movl %edi, %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %edi, %eax
+ sbbl 20(%edx), %eax
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB93_2
+# BB#1:
+ movl %eax, %edi
+.LBB93_2:
+ testb %cl, %cl
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ jne .LBB93_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB93_4:
+ movl 56(%esp), %eax
+ movl %ecx, 24(%eax)
+ movl %edx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB93_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB93_6:
+ movl %edx, 32(%eax)
+ movl 28(%esp), %edx # 4-byte Reload
+ jne .LBB93_8
+# BB#7:
+ movl %ebp, %edx
+.LBB93_8:
+ movl %edx, 36(%eax)
+ jne .LBB93_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB93_10:
+ movl %ecx, 40(%eax)
+ movl %edi, 44(%eax)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 44(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %edi
+ movl 32(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %edi
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %edi
+ movl 40(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 44(%esi), %esi
+ movl 44(%edx), %eax
+ sbbl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl $0, %ebx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl 56(%esp), %eax
+ jne .LBB94_1
+# BB#2:
+ xorl %edx, %edx
+ jmp .LBB94_3
+.LBB94_1:
+ movl 20(%eax), %edx
+.LBB94_3:
+ testb %bl, %bl
+ jne .LBB94_4
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+ jmp .LBB94_6
+.LBB94_4:
+ movl (%eax), %edi
+ movl 4(%eax), %esi
+.LBB94_6:
+ jne .LBB94_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB94_9
+.LBB94_7:
+ movl 16(%eax), %ebx
+.LBB94_9:
+ jne .LBB94_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB94_12
+.LBB94_10:
+ movl 12(%eax), %ebp
+.LBB94_12:
+ jne .LBB94_13
+# BB#14:
+ xorl %eax, %eax
+ jmp .LBB94_15
+.LBB94_13:
+ movl 8(%eax), %eax
+.LBB94_15:
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %edi, 24(%ecx)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 28(%ecx)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ movl %ebx, 40(%ecx)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %esi
+ movl 60(%esp), %ebx
+ movl %esi, %eax
+ mull 24(%ebx)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebx)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebx)
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebx)
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebx)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebx)
+ movl 56(%esp), %esi
+ movl %eax, (%esi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 28(%esi)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx
+ movl (%ecx), %edi
+ movl %ecx, %ebp
+ mull %edi
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%ebx), %ecx
+ movl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ebx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ebp), %ebp
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 96(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, %edi
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %esi, 4(%eax)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 96(%esp), %ebp # 4-byte Reload
+ addl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 8(%eax), %edi
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl 96(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 128(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %ebx, 12(%ebp)
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 24(%ebx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, (%esp) # 4-byte Spill
+ addl %ebp, %eax
+ movl 120(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 20(%eax), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 24(%eax), %ecx
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl 124(%esp), %edi
+ mull 24(%edi)
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 20(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%edi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%edi)
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ addl (%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 28(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ movl 96(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %edi, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ movl %esi, 48(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebx
+ movl 4(%esi), %edi
+ mull %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%esi), %esi
+ movl %esi, %eax
+ mull %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, %edx
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 124(%esp), %edi
+ movl 24(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ mull %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%edi), %ebx
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl (%edi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 8(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %ebx
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl 68(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 12(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 20(%eax), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 56(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %esi
+ movl %edx, (%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %ebp, 16(%eax)
+ movl %ecx, %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %ebp
+ adcl 96(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl %ebp, %ebx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl %edi, %ebp
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, %eax
+ mull 20(%esi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 4(%esi)
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl 80(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %esi, 24(%edx)
+ movl %edx, %esi
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 96(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 28(%esi)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 32(%esi)
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esi)
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%esi)
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl %eax, 48(%esi)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 16, 0x90
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: # @mcl_fp_mont7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %esi
+ movl (%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %edx
+ movl (%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 184(%esp), %ecx
+ movl -4(%ecx), %edx
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 24(%ecx), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 24(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 20(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ addl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 88(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl %edi, 44(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %ebp
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 116(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl %edi, 112(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 104(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ mull 124(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ addl 116(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 112(%esp), %edx # 4-byte Folded Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi # 4-byte Folded Reload
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ subl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ movl %ecx, %ebp
+ sbbl 148(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 144(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ sbbl 120(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB98_2
+# BB#1:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB98_2:
+ movl 172(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB98_4
+# BB#3:
+ movl 124(%esp), %edx # 4-byte Reload
+.LBB98_4:
+ movl %edx, 4(%esi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB98_6
+# BB#5:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB98_6:
+ movl %ecx, 8(%esi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB98_8
+# BB#7:
+ movl 136(%esp), %eax # 4-byte Reload
+.LBB98_8:
+ movl %eax, 12(%esi)
+ jne .LBB98_10
+# BB#9:
+ movl 140(%esp), %ecx # 4-byte Reload
+.LBB98_10:
+ movl %ecx, 16(%esi)
+ jne .LBB98_12
+# BB#11:
+ movl 144(%esp), %ebp # 4-byte Reload
+.LBB98_12:
+ movl %ebp, 20(%esi)
+ jne .LBB98_14
+# BB#13:
+ movl 148(%esp), %edi # 4-byte Reload
+.LBB98_14:
+ movl %edi, 24(%esi)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 16, 0x90
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: # @mcl_fp_montNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 184(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 148(%esp) # 4-byte Spill
+ movl 24(%esi), %edx
+ movl %edx, 124(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 8(%ebp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 84(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %esi, %edx
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ addl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 56(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, %ecx
+ imull 108(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebp
+ imull 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ imull 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ adcl %esi, %edi
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 104(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ imull %ecx, %edi
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl %edx, %eax
+ subl 148(%esp), %eax # 4-byte Folded Reload
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl 132(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %edx, %esi
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 136(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl %ebp, %ecx
+ movl %ebx, %ebp
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, %ebx
+ sbbl 124(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB99_2
+# BB#1:
+ movl %eax, %esi
+.LBB99_2:
+ movl 172(%esp), %edx
+ movl %esi, (%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB99_4
+# BB#3:
+ movl 128(%esp), %eax # 4-byte Reload
+.LBB99_4:
+ movl %eax, 4(%edx)
+ movl %ecx, %eax
+ movl 116(%esp), %ecx # 4-byte Reload
+ js .LBB99_6
+# BB#5:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB99_6:
+ movl %ecx, 8(%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 120(%esp), %ecx # 4-byte Reload
+ js .LBB99_8
+# BB#7:
+ movl 136(%esp), %ecx # 4-byte Reload
+.LBB99_8:
+ movl %ecx, 12(%edx)
+ js .LBB99_10
+# BB#9:
+ movl 148(%esp), %esi # 4-byte Reload
+.LBB99_10:
+ movl %esi, 16(%edx)
+ js .LBB99_12
+# BB#11:
+ movl %ebp, %eax
+.LBB99_12:
+ movl %eax, 20(%edx)
+ js .LBB99_14
+# BB#13:
+ movl %ebx, 112(%esp) # 4-byte Spill
+.LBB99_14:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 16, 0x90
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: # @mcl_fp_montRed7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 24(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 116(%esp), %eax # 4-byte Folded Reload
+ movl 148(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 12(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 24(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 28(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl 48(%ecx), %edi
+ movl 44(%ecx), %edx
+ movl 40(%ecx), %ebx
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ imull 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ addl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %esi, %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ebp, %eax
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %ebp, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %ebp
+ imull 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ecx, %edi
+ movl %edi, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull %ebp, %ecx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %eax
+ movl %eax, %edi
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 28(%esp), %ebx # 4-byte Reload
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edi, %edx
+ movl %edx, %ecx
+ subl 112(%esp), %ecx # 4-byte Folded Reload
+ sbbl 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 100(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %esi, %ebp
+ sbbl 120(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ sbbl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ jne .LBB100_2
+# BB#1:
+ movl %ecx, %edx
+.LBB100_2:
+ movl 144(%esp), %edi
+ movl %edx, (%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ testb %al, %al
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB100_4
+# BB#3:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB100_4:
+ movl %eax, 4(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB100_6
+# BB#5:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB100_6:
+ movl %eax, 8(%edi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB100_8
+# BB#7:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB100_8:
+ movl %ecx, 12(%edi)
+ jne .LBB100_10
+# BB#9:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB100_10:
+ movl %eax, 16(%edi)
+ jne .LBB100_12
+# BB#11:
+ movl 112(%esp), %ebp # 4-byte Reload
+.LBB100_12:
+ movl %ebp, 20(%edi)
+ jne .LBB100_14
+# BB#13:
+ movl 120(%esp), %esi # 4-byte Reload
+.LBB100_14:
+ movl %esi, 24(%edi)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 16, 0x90
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: # @mcl_fp_addPre7L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl 24(%eax), %eax
+ movl 24(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 24(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 16, 0x90
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: # @mcl_fp_subPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl %esi, 16(%ebp)
+ movl %edx, 20(%ebp)
+ movl 24(%edi), %edx
+ movl 24(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 24(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 16, 0x90
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: # @mcl_fp_shr1_7L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 20(%esi)
+ shrl %eax
+ movl %eax, 24(%esi)
+ popl %esi
+ retl
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 16, 0x90
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: # @mcl_fp_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %ebp
+ movl (%ebp), %eax
+ movl 4(%ebp), %edi
+ movl 44(%esp), %ecx
+ addl (%ecx), %eax
+ adcl 4(%ecx), %edi
+ movl 8(%ebp), %esi
+ adcl 8(%ecx), %esi
+ movl 12(%ecx), %edx
+ movl 16(%ecx), %ebx
+ adcl 12(%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 20(%ecx), %ebp
+ adcl 20(%ebx), %ebp
+ movl 24(%ecx), %edx
+ adcl 24(%ebx), %edx
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, 4(%ecx)
+ movl %esi, 8(%ecx)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%ecx)
+ movl %ebp, 20(%ecx)
+ movl %edx, 24(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %eax
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, %edi
+ sbbl 8(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, %esi
+ sbbl 20(%edi), %ebp
+ sbbl 24(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl 40(%esp), %eax
+ movl %eax, %ebx
+ movl %ecx, (%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl %esi, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edx, 24(%ebx)
+.LBB104_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 16, 0x90
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: # @mcl_fp_addNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 76(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ sbbl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%eax), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ sbbl 16(%eax), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 20(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 24(%eax), %edi
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB105_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB105_2:
+ movl 72(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB105_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB105_4:
+ movl %eax, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %ebx # 4-byte Reload
+ js .LBB105_6
+# BB#5:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB105_6:
+ movl 72(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl %eax, %ebx
+ js .LBB105_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB105_8:
+ movl %esi, 12(%ebx)
+ js .LBB105_10
+# BB#9:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB105_10:
+ movl %edx, 16(%ebx)
+ js .LBB105_12
+# BB#11:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB105_12:
+ movl %ecx, 20(%ebx)
+ js .LBB105_14
+# BB#13:
+ movl %edi, %ebp
+.LBB105_14:
+ movl %ebp, 24(%ebx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 16, 0x90
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: # @mcl_fp_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ sbbl 12(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ sbbl 16(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebp
+ sbbl 20(%esi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edi), %edi
+ sbbl 24(%esi), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 44(%esp), %ebx
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, (%ebx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %ecx, 12(%ebx)
+ movl %eax, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edi, 24(%ebx)
+ je .LBB106_2
+# BB#1: # %carry
+ movl 56(%esp), %ebp
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%ebp), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%ebp), %edx
+ movl %edx, 4(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 8(%ebp), %ecx
+ movl 12(%ebp), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%ebp), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl 20(%ebp), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 24(%ebp), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+.LBB106_2: # %nocarry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 16, 0x90
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: # @mcl_fp_subNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 60(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl 20(%eax), %esi
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 64(%esp), %edx
+ andl (%edx), %eax
+ movl 24(%edx), %esi
+ andl %ecx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ecx, %ebx
+ movl 16(%edx), %edi
+ andl %ecx, %edi
+ movl 12(%edx), %esi
+ andl %ecx, %esi
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ andl %ecx, %edx
+ movl 64(%esp), %ebp
+ andl 4(%ebp), %ecx
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl %ecx, 4(%eax)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 8(%eax)
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%eax)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 16, 0x90
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: # @mcl_fpDbl_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %esi
+ movl 68(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %ecx
+ movl 8(%esi), %eax
+ movl (%esi), %ebx
+ addl (%edx), %ebx
+ movl 64(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%esi), %ebx
+ adcl 4(%edx), %ebx
+ adcl 8(%edx), %eax
+ adcl 12(%esi), %edi
+ adcl 16(%esi), %ecx
+ movl %ebx, 4(%ebp)
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%ebp)
+ movl 20(%ebx), %eax
+ movl %edi, 12(%ebp)
+ movl 20(%edx), %edi
+ adcl %eax, %edi
+ movl 24(%ebx), %eax
+ movl %ecx, 16(%ebp)
+ movl 24(%edx), %ecx
+ adcl %eax, %ecx
+ movl 28(%ebx), %eax
+ movl %edi, 20(%ebp)
+ movl 28(%edx), %edi
+ adcl %eax, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ movl %ecx, 24(%ebp)
+ movl 32(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 40(%ebx), %ecx
+ movl 40(%edx), %eax
+ adcl %ecx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%ebx), %ebp
+ movl 44(%edx), %ecx
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%ebx), %ebp
+ movl %ebx, %eax
+ movl 48(%edx), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 52(%eax), %eax
+ movl 52(%edx), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 76(%esp), %eax
+ subl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 76(%esp), %edi
+ sbbl 8(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebx
+ sbbl 24(%edi), %ebp
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB108_2
+# BB#1:
+ movl %ebp, 32(%esp) # 4-byte Spill
+.LBB108_2:
+ testb %dl, %dl
+ movl 20(%esp), %ecx # 4-byte Reload
+ jne .LBB108_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB108_4:
+ movl 64(%esp), %eax
+ movl %ecx, 28(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB108_6
+# BB#5:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB108_6:
+ movl %ecx, 40(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ jne .LBB108_8
+# BB#7:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB108_8:
+ movl %edx, 44(%eax)
+ jne .LBB108_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB108_10:
+ movl %ecx, 48(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 60(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %edx
+ movl 8(%esi), %ebx
+ sbbl 8(%edi), %ebx
+ movl 52(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %ebx, 8(%ecx)
+ movl 20(%edi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %ebx, %eax
+ movl 24(%edi), %ebx
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %ebx, %edx
+ movl 28(%edi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %ebx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ movl 36(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 40(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ movl 44(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 48(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ movl 52(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 64(%esp), %esi
+ jne .LBB109_1
+# BB#2:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB109_3
+.LBB109_1:
+ movl 24(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB109_3:
+ testb %al, %al
+ jne .LBB109_4
+# BB#5:
+ movl $0, %edi
+ movl $0, %eax
+ jmp .LBB109_6
+.LBB109_4:
+ movl (%esi), %eax
+ movl 4(%esi), %edi
+.LBB109_6:
+ jne .LBB109_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB109_9
+.LBB109_7:
+ movl 20(%esi), %ebx
+.LBB109_9:
+ jne .LBB109_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB109_12
+.LBB109_10:
+ movl 16(%esi), %ebp
+.LBB109_12:
+ jne .LBB109_13
+# BB#14:
+ movl $0, %edx
+ jmp .LBB109_15
+.LBB109_13:
+ movl 12(%esi), %edx
+.LBB109_15:
+ jne .LBB109_16
+# BB#17:
+ xorl %esi, %esi
+ jmp .LBB109_18
+.LBB109_16:
+ movl 8(%esi), %esi
+.LBB109_18:
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 32(%ecx)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 16, 0x90
+ .type .LmulPv256x32,@function
+.LmulPv256x32: # @mulPv256x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl %edx, %esi
+ movl 68(%esp), %ebx
+ movl %ebx, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%esi)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 32(%ecx)
+ movl %ecx, %eax
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ calll .L111$pb
+.L111$pb:
+ popl %ebx
+.Ltmp2:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx
+ movl 88(%esp), %eax
+ movl %eax, (%esp)
+ leal 24(%esp), %ecx
+ movl 84(%esp), %edx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl 40(%esp), %edi
+ movl 36(%esp), %ebx
+ movl 32(%esp), %ebp
+ movl 24(%esp), %edx
+ movl 28(%esp), %ecx
+ movl 80(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %esi, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L112$pb
+.L112$pb:
+ popl %ebx
+.Ltmp3:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 16(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 32(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl 24(%edi), %esi
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ addl 16(%edi), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ addl 16(%edi), %eax
+ adcl 20(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl 24(%edi), %edx
+ adcl 8(%edi), %edx
+ movl 28(%edi), %ecx
+ adcl 12(%edi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ jb .LBB112_2
+# BB#1:
+ xorl %esi, %esi
+ xorl %ebx, %ebx
+.LBB112_2:
+ movl %ebx, -112(%ebp) # 4-byte Spill
+ movl %esi, -104(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 28(%esi), %edi
+ movl -80(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%esi), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ movl %ecx, -84(%ebp) # 4-byte Spill
+ movl %edx, %edi
+ movl -124(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -80(%ebp) # 4-byte Spill
+ movl %eax, -92(%ebp) # 4-byte Spill
+ jb .LBB112_4
+# BB#3:
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+ movl $0, %edi
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -92(%ebp) # 4-byte Folded Spill
+.LBB112_4:
+ movl %edi, -88(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -60(%ebp)
+ movl -100(%ebp), %edi # 4-byte Reload
+ movl %edi, -56(%ebp)
+ movl -108(%ebp), %esi # 4-byte Reload
+ movl %esi, -52(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ebx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %ecx, -64(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %esi # 4-byte Reload
+ movl %esi, -48(%ebp)
+ movl -128(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB112_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+.LBB112_6:
+ sbbl %eax, %eax
+ leal -76(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -44(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl %edi, %eax
+ movl -92(%ebp), %edi # 4-byte Reload
+ addl -112(%ebp), %edi # 4-byte Folded Reload
+ adcl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl %eax, -88(%ebp) # 4-byte Folded Spill
+ adcl %esi, -84(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -92(%ebp) # 4-byte Spill
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ addl -28(%ebp), %edi
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ adcl %esi, -92(%ebp) # 4-byte Folded Spill
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl 16(%esi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 20(%esi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%esi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%esi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%esi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 52(%esi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%esi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ adcl -132(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 28(%esi)
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -136(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -88(%ebp), %ecx # 4-byte Reload
+ adcl -128(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -140(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -144(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl %ecx, 48(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%esi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%esi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L113$pb
+.L113$pb:
+ popl %ebx
+.Ltmp4:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 32(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 16(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %edx
+ addl %esi, %esi
+ movl %esi, -84(%ebp) # 4-byte Spill
+ movl %ecx, %esi
+ adcl %esi, %esi
+ movl %esi, -80(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -88(%ebp) # 4-byte Spill
+ movl 24(%edi), %esi
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 8(%edi), %esi
+ movl 28(%edi), %edx
+ adcl 12(%edi), %edx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %ebx
+ sbbl %edi, %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB113_2
+# BB#1:
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+.LBB113_2:
+ movl %esi, %ebx
+ movl -88(%ebp), %edi # 4-byte Reload
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ adcl %ebx, %ebx
+ movl %edx, %edi
+ adcl %edi, %edi
+ movl -104(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_4
+# BB#3:
+ xorl %edi, %edi
+ xorl %ebx, %ebx
+.LBB113_4:
+ movl %ebx, -88(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl %ecx, -56(%ebp)
+ movl %esi, -52(%ebp)
+ movl %edx, -48(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %esi, -68(%ebp)
+ movl %edx, -64(%ebp)
+ movl -100(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_5
+# BB#6:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB113_7
+.LBB113_5:
+ shrl $31, %edx
+ movl %edx, -100(%ebp) # 4-byte Spill
+.LBB113_7:
+ leal -76(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -44(%ebp), %eax
+ movl %eax, (%esp)
+ movl -92(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl -84(%ebp), %eax # 4-byte Reload
+ addl -28(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ adcl -100(%ebp), %esi # 4-byte Folded Reload
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %edi
+ subl (%edi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%edi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%edi), %edx
+ movl 16(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ movl 20(%edi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%edi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%edi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 32(%edi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%edi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%edi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%edi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ movl 52(%edi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%edi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%edi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ movl -80(%ebp), %ecx # 4-byte Reload
+ adcl -136(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -128(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -140(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ adcl -144(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %esi, 48(%edi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%edi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%edi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%edi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 16, 0x90
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: # @mcl_fp_mont8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L114$pb
+.L114$pb:
+ popl %ebx
+.Ltmp5:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 60(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 612(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ andl $1, %ebp
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ addl 504(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 524(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 464(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 480(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 436(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 408(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ adcl 348(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 372(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 304(%esp), %edi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 316(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 272(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 224(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 228(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ adcl 236(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 204(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 144(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 172(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ adcl 108(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 116(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 128(%esp), %edi
+ adcl 132(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ andl $1, %esi
+ addl 64(%esp), %ebp
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %eax, %edx
+ movl 732(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %ebx, %ecx
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ movl %esi, %ecx
+ jne .LBB114_2
+# BB#1:
+ movl %edx, %ebp
+.LBB114_2:
+ movl 720(%esp), %edx
+ movl %ebp, (%edx)
+ testb %cl, %cl
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB114_4
+# BB#3:
+ movl %eax, %ebp
+.LBB114_4:
+ movl %ebp, 4(%edx)
+ jne .LBB114_6
+# BB#5:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB114_6:
+ movl %ebx, 8(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ jne .LBB114_8
+# BB#7:
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+.LBB114_8:
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ jne .LBB114_10
+# BB#9:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB114_10:
+ movl %edi, 16(%edx)
+ jne .LBB114_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB114_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB114_14
+# BB#13:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB114_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB114_16
+# BB#15:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB114_16:
+ movl %eax, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 16, 0x90
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: # @mcl_fp_montNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L115$pb
+.L115$pb:
+ popl %ebx
+.Ltmp6:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 640(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 616(%esp), %ecx
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 604(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 536(%esp), %ecx
+ addl 504(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 464(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 472(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ adcl 488(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 456(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 424(%esp), %edx
+ adcl 428(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 384(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ adcl 416(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 376(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ adcl 368(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 304(%esp), %ebp
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 296(%esp), %edx
+ movl %ebp, %ecx
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 224(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ adcl 240(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 216(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 192(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 144(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 156(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 160(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 136(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 116(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl $0, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 64(%esp), %esi
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 732(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ecx
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 16(%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 20(%eax), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%eax), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ sbbl 28(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ testl %edi, %edi
+ js .LBB115_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB115_2:
+ movl 720(%esp), %edx
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, (%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB115_4
+# BB#3:
+ movl %ecx, %eax
+.LBB115_4:
+ movl %eax, 4(%edx)
+ js .LBB115_6
+# BB#5:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB115_6:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB115_8
+# BB#7:
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+.LBB115_8:
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edx)
+ js .LBB115_10
+# BB#9:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB115_10:
+ movl %edi, 16(%edx)
+ js .LBB115_12
+# BB#11:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB115_12:
+ movl %ebp, 20(%edx)
+ js .LBB115_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB115_14:
+ movl %eax, 24(%edx)
+ js .LBB115_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB115_16:
+ movl %ecx, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 16, 0x90
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: # @mcl_fp_montRed8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L116$pb
+.L116$pb:
+ popl %ebx
+.Ltmp7:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx
+ movl 456(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 60(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 56(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl 12(%eax), %edi
+ movl 8(%eax), %esi
+ movl (%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 392(%esp), %ecx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ adcl 400(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 408(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 352(%esp), %edi
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 384(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 312(%esp), %edi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 316(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 272(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 232(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 236(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 252(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 192(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 204(%esp), %edi
+ adcl 208(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 152(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ adcl 160(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 172(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 112(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 136(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %edx
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ sbbl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB116_2
+# BB#1:
+ movl %edx, %ecx
+.LBB116_2:
+ movl 448(%esp), %edx
+ movl %ecx, (%edx)
+ movl %edi, %ecx
+ testb %cl, %cl
+ jne .LBB116_4
+# BB#3:
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB116_4:
+ movl 108(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB116_6
+# BB#5:
+ movl %ebp, %eax
+.LBB116_6:
+ movl %eax, 8(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB116_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB116_8:
+ movl %ebp, 12(%edx)
+ movl 100(%esp), %ebx # 4-byte Reload
+ jne .LBB116_10
+# BB#9:
+ movl 68(%esp), %ebx # 4-byte Reload
+.LBB116_10:
+ movl %ebx, 16(%edx)
+ movl 80(%esp), %edi # 4-byte Reload
+ jne .LBB116_12
+# BB#11:
+ movl 72(%esp), %edi # 4-byte Reload
+.LBB116_12:
+ movl %edi, 20(%edx)
+ movl 88(%esp), %esi # 4-byte Reload
+ jne .LBB116_14
+# BB#13:
+ movl 92(%esp), %esi # 4-byte Reload
+.LBB116_14:
+ movl %esi, 24(%edx)
+ jne .LBB116_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB116_16:
+ movl %eax, 28(%edx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 16, 0x90
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: # @mcl_fp_addPre8L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl 24(%eax), %edi
+ movl %edx, 16(%ebx)
+ movl 24(%esi), %edx
+ adcl %edi, %edx
+ movl %ecx, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl 28(%eax), %eax
+ movl 28(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 28(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 16, 0x90
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: # @mcl_fp_subPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl 24(%edi), %ebx
+ movl %esi, 16(%ebp)
+ movl 24(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl 28(%edi), %edx
+ movl 28(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 28(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 16, 0x90
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: # @mcl_fp_shr1_8L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 24(%esi)
+ shrl %eax
+ movl %eax, 28(%esi)
+ popl %esi
+ retl
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 16, 0x90
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: # @mcl_fp_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%edx), %esi
+ movl 16(%edx), %eax
+ adcl 12(%edi), %esi
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ adcl 20(%edi), %ecx
+ movl 24(%edx), %ebx
+ adcl 24(%edi), %ebx
+ movl 28(%edx), %edi
+ movl 48(%esp), %edx
+ adcl 28(%edx), %edi
+ movl 40(%esp), %edx
+ movl %ebp, (%edx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%edx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %eax, 16(%edx)
+ movl %ecx, 20(%edx)
+ movl %ebx, 24(%edx)
+ movl %edi, 28(%edx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 52(%esp), %edx
+ movl 8(%esp), %ebp # 4-byte Reload
+ subl (%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 4(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 8(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ sbbl 12(%ebp), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 4(%esp), %edx # 4-byte Reload
+ sbbl 16(%ebp), %edx
+ movl %edx, %esi
+ sbbl 20(%ebp), %ecx
+ sbbl 24(%ebp), %ebx
+ sbbl 28(%ebp), %edi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebp)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl %esi, 16(%ebp)
+ movl %ecx, 20(%ebp)
+ movl %ebx, 24(%ebp)
+ movl %edi, 28(%ebp)
+.LBB120_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 16, 0x90
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: # @mcl_fp_addNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 80(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%ebx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %esi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 12(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%ebx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 24(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 28(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, %eax
+ subl (%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ testl %esi, %esi
+ js .LBB121_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB121_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB121_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB121_4:
+ movl %eax, 4(%ebx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ js .LBB121_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB121_6:
+ movl %edi, 8(%ebx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB121_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB121_8:
+ movl %eax, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB121_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB121_10:
+ movl %edx, 16(%ebx)
+ js .LBB121_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB121_12:
+ movl %ecx, 20(%ebx)
+ js .LBB121_14
+# BB#13:
+ movl %ebp, %eax
+.LBB121_14:
+ movl %eax, 24(%ebx)
+ js .LBB121_16
+# BB#15:
+ movl %esi, %edi
+.LBB121_16:
+ movl %edi, 28(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 16, 0x90
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: # @mcl_fp_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 56(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %edi
+ sbbl 24(%ebp), %edi
+ movl 28(%esi), %esi
+ sbbl 28(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ebx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl %edi, 24(%ebx)
+ movl %esi, 28(%ebx)
+ je .LBB122_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 60(%esp), %esi
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 8(%esi), %ebp
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl 24(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+.LBB122_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 16, 0x90
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: # @mcl_fp_subNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 68(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl 24(%eax), %esi
+ movl 20(%eax), %edi
+ movl 16(%eax), %ebx
+ movl 12(%eax), %ebp
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sarl $31, %edi
+ movl 72(%esp), %ebp
+ movl 28(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%ebp), %ebx
+ andl %edi, %ebx
+ movl 16(%ebp), %esi
+ andl %edi, %esi
+ movl 12(%ebp), %edx
+ andl %edi, %edx
+ movl 8(%ebp), %ecx
+ andl %edi, %ecx
+ movl 4(%ebp), %eax
+ andl %edi, %eax
+ andl (%ebp), %edi
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 4(%ebp)
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebp)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%ebp)
+ movl %eax, 24(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 16, 0x90
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: # @mcl_fpDbl_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 80(%esp), %ebp
+ addl (%ebp), %esi
+ adcl 4(%ebp), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebp), %edi
+ movl 12(%ebp), %ebx
+ movl 76(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebp), %esi
+ adcl 12(%ecx), %ebx
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 40(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebx, 12(%eax)
+ movl 20(%ebp), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebp), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebp), %ebx
+ adcl %edx, %ebx
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %ebx, 28(%eax)
+ movl 36(%ebp), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 44(%ebp), %edi
+ adcl %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl 48(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl 52(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%ecx), %ecx
+ movl 60(%ebp), %ebp
+ adcl %ecx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %edx
+ subl (%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ sbbl 4(%eax), %ebx
+ movl %eax, %edx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ movl %edx, %ebx
+ sbbl 8(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 16(%ebx), %eax
+ sbbl 20(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB124_2
+# BB#1:
+ movl %eax, %edi
+.LBB124_2:
+ testb %cl, %cl
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB124_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB124_4:
+ movl 76(%esp), %eax
+ movl %ecx, 32(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 32(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB124_6
+# BB#5:
+ movl 4(%esp), %ebx # 4-byte Reload
+.LBB124_6:
+ movl %ebx, 36(%eax)
+ jne .LBB124_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB124_8:
+ movl %esi, 40(%eax)
+ movl 36(%esp), %esi # 4-byte Reload
+ jne .LBB124_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB124_10:
+ movl %edx, 44(%eax)
+ movl %edi, 48(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB124_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB124_12:
+ movl %esi, 52(%eax)
+ jne .LBB124_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB124_14:
+ movl %edx, 56(%eax)
+ jne .LBB124_16
+# BB#15:
+ movl %ebp, %ecx
+.LBB124_16:
+ movl %ecx, 60(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 68(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 60(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%ebx), %edx
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 40(%ebx), %eax
+ movl 40(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%ebx), %eax
+ movl 44(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebx), %eax
+ movl 48(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%ebx), %eax
+ movl 52(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 72(%esp), %ebx
+ jne .LBB125_1
+# BB#2:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB125_3
+.LBB125_1:
+ movl 28(%ebx), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+.LBB125_3:
+ testb %al, %al
+ jne .LBB125_4
+# BB#5:
+ movl $0, %ebp
+ movl $0, %eax
+ jmp .LBB125_6
+.LBB125_4:
+ movl (%ebx), %eax
+ movl 4(%ebx), %ebp
+.LBB125_6:
+ jne .LBB125_7
+# BB#8:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB125_9
+.LBB125_7:
+ movl 24(%ebx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB125_9:
+ jne .LBB125_10
+# BB#11:
+ movl $0, %edx
+ jmp .LBB125_12
+.LBB125_10:
+ movl 20(%ebx), %edx
+.LBB125_12:
+ jne .LBB125_13
+# BB#14:
+ movl $0, %esi
+ jmp .LBB125_15
+.LBB125_13:
+ movl 16(%ebx), %esi
+.LBB125_15:
+ jne .LBB125_16
+# BB#17:
+ movl $0, %edi
+ jmp .LBB125_18
+.LBB125_16:
+ movl 12(%ebx), %edi
+.LBB125_18:
+ jne .LBB125_19
+# BB#20:
+ xorl %ebx, %ebx
+ jmp .LBB125_21
+.LBB125_19:
+ movl 8(%ebx), %ebx
+.LBB125_21:
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ movl %eax, 56(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 16, 0x90
+ .type .LmulPv288x32,@function
+.LmulPv288x32: # @mulPv288x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl %edx, %esi
+ movl 76(%esp), %edi
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 36(%ecx)
+ movl %ecx, %eax
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L127$pb
+.L127$pb:
+ popl %ebx
+.Ltmp8:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv288x32
+ movl 68(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl 48(%esp), %ebx
+ movl 44(%esp), %ebp
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ebp, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %edi, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L128$pb
+.L128$pb:
+ popl %esi
+.Ltmp9:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 452(%esp), %edx
+ movl %edx, %ebp
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %edi
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %ebp, %edx
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 240(%esp), %edi
+ movl 236(%esp), %ebp
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 204(%esp), %edi
+ movl 200(%esp), %ebx
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx
+ movl 160(%esp), %edi
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 12(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %ebp
+ movl 132(%esp), %edi
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L129$pb
+.L129$pb:
+ popl %ebx
+.Ltmp10:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %ebp
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esi), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 244(%esp), %edi
+ movl 240(%esp), %ebp
+ movl 236(%esp), %esi
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebx
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %edi
+ movl 160(%esp), %ebp
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 4(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 16, 0x90
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: # @mcl_fp_mont9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L130$pb
+.L130$pb:
+ popl %ebx
+.Ltmp11:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %ebp
+ movl 756(%esp), %esi
+ movl %ebp, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %ebp
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 748(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 672(%esp), %esi
+ adcl 676(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 708(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 632(%esp), %esi
+ adcl 636(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 660(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 592(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 624(%esp), %esi
+ adcl 628(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 552(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 580(%esp), %edi
+ adcl 584(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ adcl 548(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 472(%esp), %ebp
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 484(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 500(%esp), %esi
+ adcl 504(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 452(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 392(%esp), %ebp
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 408(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 428(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 352(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 384(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 312(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 336(%esp), %esi
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 344(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 292(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 296(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 232(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 240(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 260(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ adcl 196(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 200(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 152(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 164(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 172(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl 136(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %edi
+ addl 72(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 828(%esp), %ebx
+ subl (%ebx), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebx), %edx
+ movl %esi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 12(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ movl 60(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB130_2
+# BB#1:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB130_2:
+ testb %bl, %bl
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB130_4
+# BB#3:
+ movl %eax, %esi
+.LBB130_4:
+ movl 816(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB130_6
+# BB#5:
+ movl %edx, %eax
+.LBB130_6:
+ movl %eax, 4(%ebp)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB130_8
+# BB#7:
+ movl %ecx, %eax
+.LBB130_8:
+ movl %eax, 8(%ebp)
+ movl 44(%esp), %eax # 4-byte Reload
+ jne .LBB130_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB130_10:
+ movl %eax, 12(%ebp)
+ jne .LBB130_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB130_12:
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB130_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB130_14:
+ movl %eax, 20(%ebp)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB130_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB130_16:
+ movl %eax, 24(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB130_18
+# BB#17:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB130_18:
+ movl %eax, 32(%ebp)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 16, 0x90
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: # @mcl_fp_montNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L131$pb
+.L131$pb:
+ popl %ebx
+.Ltmp12:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %esi
+ movl 756(%esp), %ebp
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %esi
+ adcl 716(%esp), %ebp
+ adcl 720(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 740(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 708(%esp), %eax
+ addl 672(%esp), %ebp
+ adcl 676(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 696(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 632(%esp), %ebp
+ adcl 636(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 656(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 628(%esp), %eax
+ addl 592(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 612(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 616(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 620(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 624(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 552(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 572(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 576(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 548(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 512(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 516(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 540(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 544(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 500(%esp), %edi
+ movl %edi, %ebp
+ adcl 504(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 468(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 436(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 444(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 448(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 452(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 460(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 464(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 392(%esp), %ebp
+ adcl 396(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 428(%esp), %esi
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 388(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 360(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 364(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 372(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 376(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 380(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 312(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 324(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 340(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 308(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 292(%esp), %ebp
+ adcl 296(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 232(%esp), %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 252(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 228(%esp), %ebp
+ movl %esi, %ecx
+ addl 192(%esp), %ecx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 196(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 152(%esp), %edi
+ adcl 156(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 164(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 148(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ adcl 116(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 132(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 72(%esp), %edi
+ movl 44(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 80(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 828(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ebx
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 12(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 16(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 32(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB131_2
+# BB#1:
+ movl %edx, %eax
+.LBB131_2:
+ movl 816(%esp), %edx
+ movl %eax, (%edx)
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB131_4
+# BB#3:
+ movl %ebx, %esi
+.LBB131_4:
+ movl %esi, 4(%edx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB131_6
+# BB#5:
+ movl %ecx, %edi
+.LBB131_6:
+ movl %edi, 8(%edx)
+ js .LBB131_8
+# BB#7:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB131_8:
+ movl %ebp, 12(%edx)
+ js .LBB131_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB131_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB131_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB131_12:
+ movl %eax, 20(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB131_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB131_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB131_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB131_16:
+ movl %eax, 28(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB131_18
+# BB#17:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB131_18:
+ movl %eax, 32(%edx)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 16, 0x90
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: # @mcl_fp_montRed9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $492, %esp # imm = 0x1EC
+ calll .L132$pb
+.L132$pb:
+ popl %ebx
+.Ltmp13:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx
+ movl 520(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 68(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 60(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 56(%eax), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 448(%esp), %ecx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 448(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 460(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 464(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 412(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 368(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 404(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 328(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 288(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 248(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 264(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 208(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 212(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 168(%esp), %ebp
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 184(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 128(%esp), %edi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl 136(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 20(%esp), %edi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 16(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ movl %ebp, %edx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB132_2
+# BB#1:
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB132_2:
+ testb %dl, %dl
+ movl 120(%esp), %ecx # 4-byte Reload
+ jne .LBB132_4
+# BB#3:
+ movl %edi, %ecx
+.LBB132_4:
+ movl 512(%esp), %edi
+ movl %ecx, (%edi)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB132_6
+# BB#5:
+ movl %eax, 124(%esp) # 4-byte Spill
+.LBB132_6:
+ movl 124(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB132_8
+# BB#7:
+ movl %esi, %eax
+.LBB132_8:
+ movl %eax, 8(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB132_10
+# BB#9:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB132_10:
+ movl %ebp, 16(%edi)
+ movl 112(%esp), %ebx # 4-byte Reload
+ jne .LBB132_12
+# BB#11:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB132_12:
+ movl %ebx, 20(%edi)
+ movl 100(%esp), %esi # 4-byte Reload
+ jne .LBB132_14
+# BB#13:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB132_14:
+ movl %esi, 24(%edi)
+ jne .LBB132_16
+# BB#15:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB132_16:
+ movl %ecx, 28(%edi)
+ jne .LBB132_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+.LBB132_18:
+ movl %eax, 32(%edi)
+ addl $492, %esp # imm = 0x1EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 16, 0x90
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: # @mcl_fp_addPre9L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl 32(%eax), %eax
+ movl 32(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 32(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 16, 0x90
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: # @mcl_fp_subPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 24(%ebp)
+ movl %esi, 28(%ebp)
+ movl 32(%edx), %edx
+ movl 32(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 32(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 16, 0x90
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: # @mcl_fp_shr1_9L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 28(%esi)
+ shrl %eax
+ movl %eax, 32(%esi)
+ popl %esi
+ retl
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 16, 0x90
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: # @mcl_fp_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, %ebp
+ adcl 4(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ adcl 20(%edi), %esi
+ movl 24(%ebx), %edx
+ adcl 24(%edi), %edx
+ movl 28(%ebx), %ecx
+ adcl 28(%edi), %ecx
+ movl 32(%ebx), %eax
+ adcl 32(%edi), %eax
+ movl 40(%esp), %edi
+ movl %ebp, (%edi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%edi)
+ movl %esi, 20(%edi)
+ movl %edx, 24(%edi)
+ movl %ecx, 28(%edi)
+ movl %eax, 32(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %edi
+ subl (%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edi), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edi), %ebp
+ sbbl 20(%edi), %esi
+ sbbl 24(%edi), %edx
+ sbbl 28(%edi), %ecx
+ sbbl 32(%edi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %esi, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl %ecx, 28(%ebx)
+ movl %eax, 32(%ebx)
+.LBB136_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 16, 0x90
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: # @mcl_fp_addNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 96(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 4(%esi), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 28(%eax), %ebp
+ movl 24(%eax), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 20(%esi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 28(%esi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 32(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 104(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ subl (%esi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 24(%esi), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 28(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ movl %ecx, %ebp
+ sbbl 32(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ js .LBB137_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB137_2:
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB137_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB137_4:
+ movl %eax, 4(%ecx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB137_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB137_6:
+ movl %eax, 8(%ecx)
+ movl %ebp, %eax
+ js .LBB137_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB137_8:
+ movl %edx, 12(%ecx)
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB137_10
+# BB#9:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB137_10:
+ movl %ebx, 16(%ecx)
+ js .LBB137_12
+# BB#11:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB137_12:
+ movl %edi, 20(%ecx)
+ js .LBB137_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB137_14:
+ movl %esi, 24(%ecx)
+ js .LBB137_16
+# BB#15:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB137_16:
+ movl %edx, 28(%ecx)
+ js .LBB137_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB137_18:
+ movl %eax, 32(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 16, 0x90
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: # @mcl_fp_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 56(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ sbbl 20(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 28(%esi), %ebp
+ sbbl 28(%edi), %ebp
+ movl 32(%esi), %esi
+ sbbl 32(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl %eax, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %esi, 32(%ebx)
+ je .LBB138_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 60(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 32(%ebx)
+.LBB138_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 16, 0x90
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: # @mcl_fp_subNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 72(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 76(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 4(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%ecx), %edx
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ movl 76(%esp), %esi
+ sbbl 8(%esi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ sbbl 12(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ sbbl 32(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 80(%esp), %ebp
+ movl 12(%ebp), %edx
+ andl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 4(%ebp), %edi
+ andl %eax, %edi
+ andl (%ebp), %eax
+ movl 32(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ roll %ecx
+ movl 24(%ebp), %ebx
+ andl %ecx, %ebx
+ movl 20(%ebp), %esi
+ andl %ecx, %esi
+ movl 16(%ebp), %edx
+ andl %ecx, %edx
+ andl 8(%ebp), %ecx
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 4(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 16(%ebp)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%ebp)
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ebp)
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 16, 0x90
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: # @mcl_fpDbl_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %edx
+ movl 92(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 88(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 44(%edx), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebp
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl 48(%edi), %ebx
+ adcl %ecx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%edx), %esi
+ movl 56(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ movl 60(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%edx), %eax
+ movl 64(%edi), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edx
+ movl 68(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 100(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ subl (%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 32(%edi), %ebx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB140_2
+# BB#1:
+ movl %ebx, %ebp
+.LBB140_2:
+ testb %dl, %dl
+ movl 60(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB140_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB140_4:
+ movl 88(%esp), %eax
+ movl %edx, 36(%eax)
+ movl %ebx, 40(%eax)
+ movl %edi, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB140_6
+# BB#5:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB140_6:
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB140_8
+# BB#7:
+ movl 24(%esp), %edx # 4-byte Reload
+.LBB140_8:
+ movl %edx, 60(%eax)
+ jne .LBB140_10
+# BB#9:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB140_10:
+ movl %ecx, 64(%eax)
+ movl %ebp, 68(%eax)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 80(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 72(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 44(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl 48(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 84(%esp), %ebp
+ jne .LBB141_1
+# BB#2:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB141_3
+.LBB141_1:
+ movl 32(%ebp), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+.LBB141_3:
+ testb %al, %al
+ jne .LBB141_4
+# BB#5:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB141_6
+.LBB141_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB141_6:
+ jne .LBB141_7
+# BB#8:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB141_9
+.LBB141_7:
+ movl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB141_9:
+ jne .LBB141_10
+# BB#11:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB141_12
+.LBB141_10:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB141_12:
+ jne .LBB141_13
+# BB#14:
+ movl $0, %edi
+ jmp .LBB141_15
+.LBB141_13:
+ movl 20(%ebp), %edi
+.LBB141_15:
+ jne .LBB141_16
+# BB#17:
+ movl $0, %ebx
+ jmp .LBB141_18
+.LBB141_16:
+ movl 16(%ebp), %ebx
+.LBB141_18:
+ jne .LBB141_19
+# BB#20:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB141_21
+.LBB141_19:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB141_21:
+ jne .LBB141_22
+# BB#23:
+ xorl %eax, %eax
+ jmp .LBB141_24
+.LBB141_22:
+ movl 8(%eax), %eax
+.LBB141_24:
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 48(%ecx)
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%ecx)
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+ .align 16, 0x90
+ .type .LmulPv320x32,@function
+.LmulPv320x32: # @mulPv320x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl %edx, %esi
+ movl 84(%esp), %edi
+ movl %edi, %eax
+ mull 36(%esi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 40(%ecx)
+ movl %ecx, %eax
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+
+ .globl mcl_fp_mulUnitPre10L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre10L,@function
+mcl_fp_mulUnitPre10L: # @mcl_fp_mulUnitPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L143$pb
+.L143$pb:
+ popl %ebx
+.Ltmp14:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ movl 48(%esp), %ebp
+ movl 44(%esp), %edi
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebp, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L
+
+ .globl mcl_fpDbl_mulPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre10L,@function
+mcl_fpDbl_mulPre10L: # @mcl_fpDbl_mulPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L144$pb
+.L144$pb:
+ popl %ebx
+.Ltmp15:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 20(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 40(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 28(%esi), %edi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ addl 20(%esi), %ebx
+ movl %ebx, -148(%ebp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ adcl 8(%esi), %edi
+ movl %edi, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ addl 20(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ adcl 24(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ adcl 12(%esi), %eax
+ movl 36(%esi), %ecx
+ adcl 16(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ jb .LBB144_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+.LBB144_2:
+ movl %edi, -136(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl 32(%ebx), %edi
+ movl -96(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%ebx), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, -144(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -160(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -96(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -100(%ebp) # 4-byte Spill
+ jb .LBB144_4
+# BB#3:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+.LBB144_4:
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -132(%ebp), %edi # 4-byte Reload
+ movl %edi, -68(%ebp)
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -64(%ebp)
+ movl %ebx, -92(%ebp)
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl %edx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl %ecx, -76(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl -144(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -56(%ebp)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB144_6
+# BB#5:
+ movl $0, %ebx
+ movl $0, %eax
+ movl $0, %edi
+.LBB144_6:
+ movl %eax, -116(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -92(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -72(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -52(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -124(%ebp), %eax # 4-byte Reload
+ addl %eax, -100(%ebp) # 4-byte Folded Spill
+ adcl %edi, -96(%ebp) # 4-byte Folded Spill
+ movl -108(%ebp), %esi # 4-byte Reload
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl -112(%ebp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ andl $1, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -100(%ebp), %eax # 4-byte Reload
+ addl -32(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ adcl -24(%ebp), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl %eax, -116(%ebp) # 4-byte Folded Spill
+ movl -52(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl -48(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -44(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -40(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -36(%ebp), %edi
+ sbbl 16(%esi), %edi
+ movl 20(%esi), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ subl %eax, %ecx
+ movl 44(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 48(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ sbbl %eax, -120(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 56(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ addl -124(%ebp), %ecx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 20(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%esi)
+ adcl -136(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ adcl -140(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 32(%esi)
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -160(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -96(%ebp), %ecx # 4-byte Reload
+ adcl -164(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -168(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -104(%ebp), %ecx # 4-byte Reload
+ adcl -172(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -176(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -180(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 72(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 76(%esi)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L
+
+ .globl mcl_fpDbl_sqrPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre10L,@function
+mcl_fpDbl_sqrPre10L: # @mcl_fpDbl_sqrPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L145$pb
+.L145$pb:
+ popl %ebx
+.Ltmp16:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 40(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 36(%edi), %eax
+ movl 32(%edi), %ebx
+ movl 28(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 20(%edi), %ecx
+ adcl 24(%edi), %edx
+ adcl 8(%edi), %esi
+ adcl 12(%edi), %ebx
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -108(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -104(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -96(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -116(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_1
+# BB#2:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_3
+.LBB145_1:
+ leal (%ecx,%ecx), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+.LBB145_3:
+ movl -96(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ movl -124(%ebp), %edi # 4-byte Reload
+ jb .LBB145_4
+# BB#5:
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_6
+.LBB145_4:
+ movl %edx, %ebx
+ shldl $1, %ecx, %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+.LBB145_6:
+ movl -100(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_7
+# BB#8:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_9
+.LBB145_7:
+ movl %esi, %ebx
+ shldl $1, %edx, %ebx
+ movl %ebx, -100(%ebp) # 4-byte Spill
+.LBB145_9:
+ movl -104(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_10
+# BB#11:
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_12
+.LBB145_10:
+ movl %edi, %ebx
+ shldl $1, %esi, %ebx
+ movl %ebx, -104(%ebp) # 4-byte Spill
+.LBB145_12:
+ movl -108(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_13
+# BB#14:
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_15
+.LBB145_13:
+ movl %eax, %ebx
+ shldl $1, %edi, %ebx
+ movl %ebx, -108(%ebp) # 4-byte Spill
+.LBB145_15:
+ movl %ecx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %esi, -64(%ebp)
+ movl %edi, -60(%ebp)
+ movl %eax, -56(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl %esi, -84(%ebp)
+ movl %edi, -80(%ebp)
+ movl %eax, -76(%ebp)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_16
+# BB#17:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_18
+.LBB145_16:
+ shrl $31, %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+.LBB145_18:
+ leal -52(%ebp), %eax
+ movl %eax, (%esp)
+ leal -72(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -92(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -116(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -120(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -112(%ebp), %edi # 4-byte Reload
+ addl -32(%ebp), %edi
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ adcl -124(%ebp), %esi # 4-byte Folded Reload
+ movl -52(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -48(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -44(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -36(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 40(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 44(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 48(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 56(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -124(%ebp), %edx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 20(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -132(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%eax)
+ adcl -136(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 28(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -140(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 32(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -160(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -96(%ebp), %edx # 4-byte Reload
+ adcl -164(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 40(%eax)
+ movl -100(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -104(%ebp), %edx # 4-byte Reload
+ adcl -172(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -108(%ebp), %ecx # 4-byte Reload
+ adcl -176(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 52(%eax)
+ adcl -180(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 56(%eax)
+ movl %esi, 60(%eax)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 64(%eax)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 68(%eax)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 72(%eax)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L
+
+ .globl mcl_fp_mont10L
+ .align 16, 0x90
+ .type mcl_fp_mont10L,@function
+mcl_fp_mont10L: # @mcl_fp_mont10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1036, %esp # imm = 0x40C
+ calll .L146$pb
+.L146$pb:
+ popl %ebx
+.Ltmp17:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx
+ movl 1068(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 992(%esp), %edi
+ movl 996(%esp), %ebp
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1024(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1020(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1016(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1000(%esp), %esi
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ addl 944(%esp), %edi
+ adcl 948(%esp), %ebp
+ adcl 952(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1064(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 896(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ addl 896(%esp), %ebp
+ adcl 900(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 848(%esp), %ebp
+ adcl 852(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 876(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 880(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ addl 800(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 836(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 752(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 780(%esp), %esi
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 728(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 744(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ movl %esi, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 656(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 696(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 608(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 624(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 640(%esp), %esi
+ adcl 644(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 592(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 520(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 464(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 472(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 504(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 432(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 444(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 368(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 380(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 384(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 320(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 272(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl %edi, %ecx
+ addl 224(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 264(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ addl 176(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 192(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 128(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 144(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ sbbl %esi, %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ addl 80(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1068(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl 36(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB146_2
+# BB#1:
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB146_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB146_4
+# BB#3:
+ movl %eax, %esi
+.LBB146_4:
+ movl 1056(%esp), %eax
+ movl %esi, (%eax)
+ movl 60(%esp), %edi # 4-byte Reload
+ jne .LBB146_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB146_6:
+ movl %edi, 4(%eax)
+ jne .LBB146_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB146_8:
+ movl %ebx, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB146_10
+# BB#9:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB146_10:
+ movl %ebp, 12(%eax)
+ jne .LBB146_12
+# BB#11:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB146_12:
+ movl %ecx, 16(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB146_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB146_14:
+ movl %ecx, 20(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB146_16
+# BB#15:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB146_16:
+ movl %ecx, 24(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB146_18
+# BB#17:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB146_18:
+ movl %ecx, 32(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB146_20
+# BB#19:
+ movl %edx, %ecx
+.LBB146_20:
+ movl %ecx, 36(%eax)
+ addl $1036, %esp # imm = 0x40C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end146:
+ .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L
+
+ .globl mcl_fp_montNF10L
+ .align 16, 0x90
+ .type mcl_fp_montNF10L,@function
+mcl_fp_montNF10L: # @mcl_fp_montNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1020, %esp # imm = 0x3FC
+ calll .L147$pb
+.L147$pb:
+ popl %ebx
+.Ltmp18:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx
+ movl 1052(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 976(%esp), %edi
+ movl 980(%esp), %esi
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 996(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 992(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 988(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 984(%esp), %ebp
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ adcl 936(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 952(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 920(%esp), %ecx
+ addl 880(%esp), %esi
+ adcl 884(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 832(%esp), %esi
+ adcl 836(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 848(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 824(%esp), %ecx
+ addl 784(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 796(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 736(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 760(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 764(%esp), %ebp
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 768(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 728(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 688(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 712(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 716(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ addl 640(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 672(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 680(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 632(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 604(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 628(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 544(%esp), %esi
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 548(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 552(%esp), %esi
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 556(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 536(%esp), %edx
+ addl 496(%esp), %edi
+ adcl 500(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 448(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 464(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 480(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 440(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 400(%esp), %ecx
+ adcl 404(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 408(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 412(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 416(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 420(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 424(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 428(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 432(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 436(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 352(%esp), %esi
+ adcl 356(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 368(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 344(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 316(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 324(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 256(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 272(%esp), %edi
+ adcl 276(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 248(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 160(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 176(%esp), %edi
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 192(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 152(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 124(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 144(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 64(%esp), %ebp
+ movl %edi, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ adcl 80(%esp), %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 96(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1052(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ecx
+ movl %ebx, %eax
+ sbbl 8(%edi), %eax
+ movl %ebp, %esi
+ sbbl 12(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 16(%edi), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ sbbl 20(%edi), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 24(%edi), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB147_2
+# BB#1:
+ movl %edx, %edi
+.LBB147_2:
+ movl 1040(%esp), %edx
+ movl %edi, (%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ js .LBB147_4
+# BB#3:
+ movl %ecx, %edi
+.LBB147_4:
+ movl %edi, 4(%edx)
+ js .LBB147_6
+# BB#5:
+ movl %eax, %ebx
+.LBB147_6:
+ movl %ebx, 8(%edx)
+ js .LBB147_8
+# BB#7:
+ movl 4(%esp), %ebp # 4-byte Reload
+.LBB147_8:
+ movl %ebp, 12(%edx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ js .LBB147_10
+# BB#9:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB147_10:
+ movl %esi, 16(%edx)
+ js .LBB147_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB147_12:
+ movl %eax, 20(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB147_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB147_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB147_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB147_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB147_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB147_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB147_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB147_20:
+ movl %eax, 36(%edx)
+ addl $1020, %esp # imm = 0x3FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end147:
+ .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L
+
+ .globl mcl_fp_montRed10L
+ .align 16, 0x90
+ .type mcl_fp_montRed10L,@function
+mcl_fp_montRed10L: # @mcl_fp_montRed10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $604, %esp # imm = 0x25C
+ calll .L148$pb
+.L148$pb:
+ popl %eax
+.Ltmp19:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 632(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 628(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 76(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 560(%esp), %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ adcl 568(%esp), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 576(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 580(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 512(%esp), %esi
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 516(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 536(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 464(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 492(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ movl 60(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 632(%esp), %eax
+ movl %eax, %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 368(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 320(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 352(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 272(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 288(%esp), %ebp
+ adcl 292(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 312(%esp), %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 224(%esp), %eax
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 240(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 260(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 264(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 176(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 184(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 128(%esp), %esi
+ movl %edi, %eax
+ adcl 132(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 140(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 144(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 164(%esp), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB148_2
+# BB#1:
+ movl %edx, 80(%esp) # 4-byte Spill
+.LBB148_2:
+ testb %al, %al
+ movl 112(%esp), %edx # 4-byte Reload
+ jne .LBB148_4
+# BB#3:
+ movl %edi, %edx
+.LBB148_4:
+ movl 624(%esp), %edi
+ movl %edx, (%edi)
+ movl 108(%esp), %edx # 4-byte Reload
+ jne .LBB148_6
+# BB#5:
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB148_6:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ movl 116(%esp), %ecx # 4-byte Reload
+ jne .LBB148_8
+# BB#7:
+ movl %esi, %ecx
+.LBB148_8:
+ movl %ecx, 8(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB148_10
+# BB#9:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB148_10:
+ movl %eax, 16(%edi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ jne .LBB148_12
+# BB#11:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB148_12:
+ movl %ebp, 20(%edi)
+ movl 88(%esp), %ebx # 4-byte Reload
+ jne .LBB148_14
+# BB#13:
+ movl 72(%esp), %ebx # 4-byte Reload
+.LBB148_14:
+ movl %ebx, 24(%edi)
+ jne .LBB148_16
+# BB#15:
+ movl 92(%esp), %edx # 4-byte Reload
+.LBB148_16:
+ movl %edx, 28(%edi)
+ jne .LBB148_18
+# BB#17:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB148_18:
+ movl %ecx, 32(%edi)
+ jne .LBB148_20
+# BB#19:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB148_20:
+ movl %eax, 36(%edi)
+ addl $604, %esp # imm = 0x25C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end148:
+ .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L
+
+ .globl mcl_fp_addPre10L
+ .align 16, 0x90
+ .type mcl_fp_addPre10L,@function
+mcl_fp_addPre10L: # @mcl_fp_addPre10L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 28(%ebx)
+ movl %esi, 32(%ebx)
+ movl 36(%eax), %eax
+ movl 36(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 36(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end149:
+ .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L
+
+ .globl mcl_fp_subPre10L
+ .align 16, 0x90
+ .type mcl_fp_subPre10L,@function
+mcl_fp_subPre10L: # @mcl_fp_subPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 28(%ebp)
+ movl %edi, 32(%ebp)
+ movl 36(%edx), %edx
+ movl 36(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 36(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end150:
+ .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L
+
+ .globl mcl_fp_shr1_10L
+ .align 16, 0x90
+ .type mcl_fp_shr1_10L,@function
+mcl_fp_shr1_10L: # @mcl_fp_shr1_10L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 32(%esi)
+ shrl %eax
+ movl %eax, 36(%esi)
+ popl %esi
+ retl
+.Lfunc_end151:
+ .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L
+
+ .globl mcl_fp_add10L
+ .align 16, 0x90
+ .type mcl_fp_add10L,@function
+mcl_fp_add10L: # @mcl_fp_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 52(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 48(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebx), %esi
+ adcl 24(%edi), %esi
+ movl 28(%ebx), %ebp
+ adcl 28(%edi), %ebp
+ movl 32(%ebx), %edx
+ adcl 32(%edi), %edx
+ movl 36(%ebx), %ecx
+ adcl 36(%edi), %ecx
+ movl 44(%esp), %edi
+ movl (%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edi)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%edi)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ movl %esi, 24(%edi)
+ movl %ebp, 28(%edi)
+ movl %edx, 32(%edi)
+ movl %ecx, 36(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 56(%esp), %edi
+ subl (%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ sbbl 28(%edi), %ebp
+ sbbl 32(%edi), %edx
+ sbbl 36(%edi), %ecx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB152_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 44(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl %esi, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+.LBB152_2: # %carry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end152:
+ .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L
+
+ .globl mcl_fp_addNF10L
+ .align 16, 0x90
+ .type mcl_fp_addNF10L,@function
+mcl_fp_addNF10L: # @mcl_fp_addNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %esi
+ movl 96(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%edx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %ebx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %esi
+ adcl 8(%edx), %esi
+ adcl 12(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 24(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 28(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 32(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ adcl 36(%edx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ subl (%edi), %edx
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 4(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 8(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebp
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebx
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ movl 52(%esp), %esi # 4-byte Reload
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB153_2
+# BB#1:
+ movl %edx, %esi
+.LBB153_2:
+ movl 92(%esp), %edx
+ movl %esi, (%edx)
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB153_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+.LBB153_4:
+ movl %esi, 4(%edx)
+ movl %ebp, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ js .LBB153_6
+# BB#5:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB153_6:
+ movl %ecx, 8(%edx)
+ movl %ebx, %ecx
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB153_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB153_8:
+ movl %esi, 12(%edx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ js .LBB153_10
+# BB#9:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB153_10:
+ movl %ebp, 16(%edx)
+ js .LBB153_12
+# BB#11:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB153_12:
+ movl %ebx, 20(%edx)
+ js .LBB153_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB153_14:
+ movl %edi, 24(%edx)
+ js .LBB153_16
+# BB#15:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB153_16:
+ movl %esi, 28(%edx)
+ js .LBB153_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB153_18:
+ movl %ecx, 32(%edx)
+ js .LBB153_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB153_20:
+ movl %eax, 36(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end153:
+ .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L
+
+ .globl mcl_fp_sub10L
+ .align 16, 0x90
+ .type mcl_fp_sub10L,@function
+mcl_fp_sub10L: # @mcl_fp_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 60(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ sbbl 20(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ sbbl 24(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ sbbl 32(%edi), %ebp
+ movl 36(%esi), %esi
+ sbbl 36(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 52(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl %edx, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %esi, 36(%ebx)
+ je .LBB154_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 64(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+.LBB154_2: # %nocarry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end154:
+ .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L
+
+ .globl mcl_fp_subNF10L
+ .align 16, 0x90
+ .type mcl_fp_subNF10L,@function
+mcl_fp_subNF10L: # @mcl_fp_subNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %eax
+ movl 36(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 84(%esp), %ecx
+ subl (%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl 24(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 16(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 36(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %ecx
+ adcl %ecx, %ecx
+ movl %esi, %ebx
+ shrl $31, %ebx
+ orl %edx, %ebx
+ movl 88(%esp), %edi
+ movl 20(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ andl 4(%edi), %ecx
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ andl %ebx, %edx
+ andl (%edi), %ebx
+ movl 36(%edi), %esi
+ andl %eax, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %ebp
+ andl %eax, %ebp
+ movl 28(%edi), %esi
+ andl %eax, %esi
+ andl 24(%edi), %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %edi
+ movl %ebx, (%edi)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 8(%edi)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 20(%edi)
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, 28(%edi)
+ movl %ebp, 32(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end155:
+ .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L
+
+ .globl mcl_fpDbl_add10L
+ .align 16, 0x90
+ .type mcl_fpDbl_add10L,@function
+mcl_fpDbl_add10L: # @mcl_fpDbl_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %edx
+ movl 96(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 92(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 48(%edx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %ecx
+ adcl %ebx, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%edx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl 56(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl 60(%edi), %ecx
+ adcl %eax, %ecx
+ movl 64(%edx), %esi
+ movl 64(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 68(%edx), %ebx
+ movl 68(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%edx), %ebx
+ movl 72(%edi), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 76(%edx), %edx
+ movl 76(%edi), %edi
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 104(%esp), %ebx
+ movl 64(%esp), %edi # 4-byte Reload
+ subl (%ebx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ sbbl 20(%ebx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ sbbl 36(%ebx), %edi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB156_2
+# BB#1:
+ movl %edi, %ebp
+.LBB156_2:
+ testb %dl, %dl
+ movl 64(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB156_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB156_4:
+ movl 92(%esp), %eax
+ movl %edx, 40(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 44(%eax)
+ movl %ebx, 48(%eax)
+ movl %edi, 52(%eax)
+ movl %esi, 56(%eax)
+ movl %ecx, 60(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB156_6
+# BB#5:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB156_6:
+ movl %ecx, 64(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB156_8
+# BB#7:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB156_8:
+ movl %edx, 68(%eax)
+ jne .LBB156_10
+# BB#9:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB156_10:
+ movl %ecx, 72(%eax)
+ movl %ebp, 76(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end156:
+ .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L
+
+ .globl mcl_fpDbl_sub10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub10L,@function
+mcl_fpDbl_sub10L: # @mcl_fpDbl_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %edx
+ movl 4(%ebp), %esi
+ movl 88(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %esi
+ movl 8(%ebp), %edi
+ sbbl 8(%eax), %edi
+ movl 80(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%ebp), %esi
+ sbbl 16(%eax), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%ebp), %edx
+ sbbl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%ebp), %esi
+ sbbl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%ebp), %edx
+ sbbl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%ebp), %esi
+ sbbl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%ebp), %edx
+ sbbl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%ebp), %esi
+ sbbl %edi, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %edx, 36(%ecx)
+ movl 44(%ebp), %edx
+ sbbl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl 48(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl 52(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%eax), %edx
+ movl 56(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 60(%eax), %edx
+ movl 60(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 64(%eax), %edx
+ movl 64(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%eax), %edx
+ movl 68(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 72(%eax), %edx
+ movl 72(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 76(%eax), %eax
+ movl 76(%ebp), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 92(%esp), %esi
+ jne .LBB157_1
+# BB#2:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB157_3
+.LBB157_1:
+ movl 36(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+.LBB157_3:
+ testb %al, %al
+ jne .LBB157_4
+# BB#5:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB157_6
+.LBB157_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB157_6:
+ jne .LBB157_7
+# BB#8:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB157_9
+.LBB157_7:
+ movl 32(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB157_9:
+ jne .LBB157_10
+# BB#11:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB157_12
+.LBB157_10:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB157_12:
+ jne .LBB157_13
+# BB#14:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB157_15
+.LBB157_13:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB157_15:
+ jne .LBB157_16
+# BB#17:
+ movl $0, %ebp
+ jmp .LBB157_18
+.LBB157_16:
+ movl 20(%esi), %ebp
+.LBB157_18:
+ jne .LBB157_19
+# BB#20:
+ movl $0, %eax
+ jmp .LBB157_21
+.LBB157_19:
+ movl 16(%esi), %eax
+.LBB157_21:
+ jne .LBB157_22
+# BB#23:
+ movl $0, %edx
+ jmp .LBB157_24
+.LBB157_22:
+ movl 12(%esi), %edx
+.LBB157_24:
+ jne .LBB157_25
+# BB#26:
+ xorl %esi, %esi
+ jmp .LBB157_27
+.LBB157_25:
+ movl 8(%esi), %esi
+.LBB157_27:
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl %eax, 72(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L
+
+ .align 16, 0x90
+ .type .LmulPv352x32,@function
+.LmulPv352x32: # @mulPv352x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl %edx, %ebx
+ movl 92(%esp), %edi
+ movl %edi, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%ebx)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%ebx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 44(%ecx)
+ movl %ecx, %eax
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+
+ .globl mcl_fp_mulUnitPre11L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre11L,@function
+mcl_fp_mulUnitPre11L: # @mcl_fp_mulUnitPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L159$pb
+.L159$pb:
+ popl %ebx
+.Ltmp20:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv352x32
+ movl 84(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L
+
+ .globl mcl_fpDbl_mulPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre11L,@function
+mcl_fpDbl_mulPre11L: # @mcl_fpDbl_mulPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L160$pb
+.L160$pb:
+ popl %eax
+.Ltmp21:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 648(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 644(%esp), %edx
+ movl %edx, %ebp
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %esi
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 648(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %ebp, %edx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 428(%esp), %ecx
+ movl 432(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 396(%esp), %ebp
+ movl 392(%esp), %edi
+ movl 388(%esp), %esi
+ movl 380(%esp), %ecx
+ movl 384(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 348(%esp), %ebx
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 300(%esp), %ebp
+ movl 296(%esp), %edi
+ movl 292(%esp), %esi
+ movl 284(%esp), %ecx
+ movl 288(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %ebx
+ movl 248(%esp), %edi
+ movl 244(%esp), %esi
+ movl 236(%esp), %ecx
+ movl 240(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %edi
+ movl 36(%edi), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 644(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L
+
+ .globl mcl_fpDbl_sqrPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre11L,@function
+mcl_fpDbl_sqrPre11L: # @mcl_fpDbl_sqrPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L161$pb
+.L161$pb:
+ popl %ebx
+.Ltmp22:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %ebp
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %esi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 448(%esp), %ebx
+ movl 444(%esp), %edi
+ movl 440(%esp), %esi
+ movl 436(%esp), %edx
+ movl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 396(%esp), %edi
+ movl 392(%esp), %esi
+ movl 388(%esp), %edx
+ movl 380(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 384(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 348(%esp), %ebp
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 260(%esp), %ebx
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %edi
+ movl 248(%esp), %esi
+ movl 244(%esp), %edx
+ movl 236(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 240(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L
+
+ .globl mcl_fp_mont11L
+ .align 16, 0x90
+ .type mcl_fp_mont11L,@function
+mcl_fp_mont11L: # @mcl_fp_mont11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L162$pb
+.L162$pb:
+ popl %ebx
+.Ltmp23:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %edi
+ movl 1084(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ adcl 1044(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ sbbl %edi, %edi
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1024(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 936(%esp), %esi
+ adcl 940(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 964(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ addl 888(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %esi
+ movl %esi, %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 840(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 844(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 848(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 852(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 872(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ adcl 880(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 792(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 828(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 744(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 776(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 788(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 648(%esp), %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ adcl 680(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 608(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 624(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 552(%esp), %edi
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 560(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 580(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 584(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 504(%esp), %ecx
+ adcl 508(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 536(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 456(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 484(%esp), %edi
+ adcl 488(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ adcl 412(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ adcl 432(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 360(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 368(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ adcl 316(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 332(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 348(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 264(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 300(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 304(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 252(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ andl $1, %ecx
+ addl 168(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 172(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 184(%esp), %ebp
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ addl 120(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 136(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1164(%esp), %ebp
+ subl (%ebp), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ sbbl 12(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, %ebp
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB162_2
+# BB#1:
+ movl %ebx, 28(%esp) # 4-byte Spill
+.LBB162_2:
+ movl %esi, %ebx
+ testb %bl, %bl
+ movl 68(%esp), %ebx # 4-byte Reload
+ jne .LBB162_4
+# BB#3:
+ movl %eax, %ebx
+.LBB162_4:
+ movl 1152(%esp), %eax
+ movl %ebx, (%eax)
+ movl 56(%esp), %edi # 4-byte Reload
+ jne .LBB162_6
+# BB#5:
+ movl %edx, %edi
+.LBB162_6:
+ movl %edi, 4(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB162_8
+# BB#7:
+ movl %ecx, %edx
+.LBB162_8:
+ movl %edx, 8(%eax)
+ jne .LBB162_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB162_10:
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB162_12
+# BB#11:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB162_12:
+ movl %ecx, 16(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB162_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB162_14:
+ movl %ecx, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ jne .LBB162_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB162_16:
+ movl %ecx, 24(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ jne .LBB162_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB162_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB162_20
+# BB#19:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB162_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB162_22
+# BB#21:
+ movl %ebp, %ecx
+.LBB162_22:
+ movl %ecx, 40(%eax)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end162:
+ .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L
+
+ .globl mcl_fp_montNF11L
+ .align 16, 0x90
+ .type mcl_fp_montNF11L,@function
+mcl_fp_montNF11L: # @mcl_fp_montNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L163$pb
+.L163$pb:
+ popl %ebx
+.Ltmp24:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %ebp
+ movl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %edi
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ adcl 1044(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1048(%esp), %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1028(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 996(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 1000(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ addl 936(%esp), %ebp
+ adcl 940(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 960(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 980(%esp), %ebp
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 932(%esp), %eax
+ addl 888(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 900(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 908(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 840(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 860(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 884(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 836(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 792(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 800(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 812(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 816(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 820(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 832(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 744(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 768(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 780(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 784(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 740(%esp), %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 712(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 716(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 720(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 732(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 736(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 648(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 672(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 688(%esp), %esi
+ movl %esi, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 644(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 608(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 612(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 616(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 620(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 624(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 628(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 632(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 552(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 548(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 512(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 456(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 480(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 488(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 452(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 428(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 360(%esp), %esi
+ adcl 364(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 372(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 332(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 264(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 284(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 292(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 260(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 240(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 244(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 168(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 176(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 180(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 204(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 164(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 120(%esp), %ecx
+ adcl 124(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 136(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ adcl 88(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1164(%esp), %ebx
+ subl (%ebx), %edx
+ movl %ecx, %esi
+ sbbl 4(%ebx), %esi
+ movl %edi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 28(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl 36(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 40(%ebx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 68(%esp), %ebx # 4-byte Reload
+ js .LBB163_2
+# BB#1:
+ movl %edx, %ebx
+.LBB163_2:
+ movl 1152(%esp), %edx
+ movl %ebx, (%edx)
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB163_4
+# BB#3:
+ movl %esi, %ebp
+.LBB163_4:
+ movl %ebp, 4(%edx)
+ js .LBB163_6
+# BB#5:
+ movl %ecx, %edi
+.LBB163_6:
+ movl %edi, 8(%edx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB163_8
+# BB#7:
+ movl %eax, %ecx
+.LBB163_8:
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB163_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB163_10:
+ movl %eax, 16(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB163_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB163_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB163_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB163_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB163_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB163_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB163_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB163_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB163_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB163_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB163_22
+# BB#21:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB163_22:
+ movl %eax, 40(%edx)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end163:
+ .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L
+
+ .globl mcl_fp_montRed11L
+ .align 16, 0x90
+ .type mcl_fp_montRed11L,@function
+mcl_fp_montRed11L: # @mcl_fp_montRed11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $668, %esp # imm = 0x29C
+ calll .L164$pb
+.L164$pb:
+ popl %eax
+.Ltmp25:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 696(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 84(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebp
+ movl 20(%ecx), %edi
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 616(%esp), %ecx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 620(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 632(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 640(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 568(%esp), %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 572(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 472(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 424(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 404(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 696(%esp), %eax
+ movl %eax, %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 284(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 304(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 232(%esp), %ebp
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 236(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 276(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 136(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 128(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 180(%esp), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %ebp, %ebx
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB164_2
+# BB#1:
+ movl %esi, 112(%esp) # 4-byte Spill
+.LBB164_2:
+ testb %bl, %bl
+ movl 132(%esp), %esi # 4-byte Reload
+ jne .LBB164_4
+# BB#3:
+ movl %edi, %esi
+.LBB164_4:
+ movl 688(%esp), %edi
+ movl %esi, (%edi)
+ movl 104(%esp), %esi # 4-byte Reload
+ jne .LBB164_6
+# BB#5:
+ movl %edx, 128(%esp) # 4-byte Spill
+.LBB164_6:
+ movl 128(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%edi)
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB164_8
+# BB#7:
+ movl %ecx, %edx
+.LBB164_8:
+ movl %edx, 8(%edi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 92(%esp), %edx # 4-byte Reload
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB164_10
+# BB#9:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB164_10:
+ movl %ecx, 16(%edi)
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB164_12
+# BB#11:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB164_12:
+ movl %eax, 20(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB164_14
+# BB#13:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB164_14:
+ movl %ebp, 24(%edi)
+ jne .LBB164_16
+# BB#15:
+ movl 76(%esp), %esi # 4-byte Reload
+.LBB164_16:
+ movl %esi, 28(%edi)
+ jne .LBB164_18
+# BB#17:
+ movl 84(%esp), %edx # 4-byte Reload
+.LBB164_18:
+ movl %edx, 32(%edi)
+ jne .LBB164_20
+# BB#19:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB164_20:
+ movl %ecx, 36(%edi)
+ jne .LBB164_22
+# BB#21:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB164_22:
+ movl %eax, 40(%edi)
+ addl $668, %esp # imm = 0x29C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end164:
+ .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L
+
+ .globl mcl_fp_addPre11L
+ .align 16, 0x90
+ .type mcl_fp_addPre11L,@function
+mcl_fp_addPre11L: # @mcl_fp_addPre11L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl 40(%eax), %eax
+ movl 40(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end165:
+ .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L
+
+ .globl mcl_fp_subPre11L
+ .align 16, 0x90
+ .type mcl_fp_subPre11L,@function
+mcl_fp_subPre11L: # @mcl_fp_subPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 32(%ebp)
+ movl %esi, 36(%ebp)
+ movl 40(%edx), %edx
+ movl 40(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 40(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end166:
+ .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L
+
+ .globl mcl_fp_shr1_11L
+ .align 16, 0x90
+ .type mcl_fp_shr1_11L,@function
+mcl_fp_shr1_11L: # @mcl_fp_shr1_11L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 32(%esi)
+ movl 40(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 36(%esi)
+ shrl %eax
+ movl %eax, 40(%esi)
+ popl %esi
+ retl
+.Lfunc_end167:
+ .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L
+
+ .globl mcl_fp_add11L
+ .align 16, 0x90
+ .type mcl_fp_add11L,@function
+mcl_fp_add11L: # @mcl_fp_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 60(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 56(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl 16(%esi), %ecx
+ adcl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ebx
+ adcl 28(%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ adcl 32(%edi), %ecx
+ movl 36(%esi), %eax
+ adcl 36(%edi), %eax
+ movl 40(%esi), %edx
+ adcl 40(%edi), %edx
+ movl 52(%esp), %esi
+ movl %ebp, (%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%esi)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%esi)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%esi)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%esi)
+ movl %ebx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edx, 40(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 64(%esp), %ebp
+ movl 4(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl (%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %ecx
+ sbbl 36(%ebp), %eax
+ sbbl 40(%ebp), %edx
+ movl %edx, %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB168_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%esi)
+ movl 28(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%esi)
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%esi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%esi)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edi, 40(%esi)
+.LBB168_2: # %carry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end168:
+ .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L
+
+ .globl mcl_fp_addNF11L
+ .align 16, 0x90
+ .type mcl_fp_addNF11L,@function
+mcl_fp_addNF11L: # @mcl_fp_addNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 104(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ebx
+ movl 36(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ movl 16(%edx), %edi
+ movl 12(%edx), %eax
+ movl 8(%edx), %ecx
+ adcl 8(%esi), %ecx
+ adcl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 24(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl 40(%esi), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx
+ movl 52(%esp), %esi # 4-byte Reload
+ subl (%ebx), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ movl %edi, %ebp
+ sbbl 36(%ebx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB169_2
+# BB#1:
+ movl %esi, %edi
+.LBB169_2:
+ movl 100(%esp), %esi
+ movl %edi, (%esi)
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB169_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB169_4:
+ movl %edi, 4(%esi)
+ movl %eax, %edi
+ js .LBB169_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB169_6:
+ movl %edx, 8(%esi)
+ movl %ebp, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB169_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB169_8:
+ movl %eax, 12(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB169_10
+# BB#9:
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+.LBB169_10:
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ js .LBB169_12
+# BB#11:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB169_12:
+ movl %ebp, 20(%esi)
+ js .LBB169_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB169_14:
+ movl %edi, 24(%esi)
+ js .LBB169_16
+# BB#15:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB169_16:
+ movl %eax, 28(%esi)
+ js .LBB169_18
+# BB#17:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB169_18:
+ movl %edx, 32(%esi)
+ js .LBB169_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB169_20:
+ movl %ecx, 36(%esi)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB169_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB169_22:
+ movl %eax, 40(%esi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end169:
+ .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L
+
+ .globl mcl_fp_sub11L
+ .align 16, 0x90
+ .type mcl_fp_sub11L,@function
+mcl_fp_sub11L: # @mcl_fp_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ sbbl 12(%edi), %ebx
+ movl 16(%ebp), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 36(%ebp), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 16(%esp), %esi # 4-byte Reload
+ movl $0, %ebx
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %ebp, 12(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%ebx)
+ movl %ecx, %edi
+ movl %eax, 40(%ebx)
+ je .LBB170_2
+# BB#1: # %carry
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %eax, %esi
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl %ebp, %eax
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+.LBB170_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end170:
+ .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L
+
+ .globl mcl_fp_subNF11L
+ .align 16, 0x90
+ .type mcl_fp_subNF11L,@function
+mcl_fp_subNF11L: # @mcl_fp_subNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 92(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 28(%eax), %ebx
+ movl 24(%eax), %ebp
+ movl 20(%eax), %esi
+ movl 16(%eax), %edx
+ movl 12(%eax), %ecx
+ movl 8(%eax), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 40(%edi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl 96(%esp), %edx
+ movl 4(%edx), %ecx
+ andl %eax, %ecx
+ movl %ecx, %ebx
+ andl (%edx), %eax
+ movl 40(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ andl %esi, %ebp
+ roll %esi
+ movl 16(%edx), %edi
+ andl %esi, %edi
+ movl 12(%edx), %ecx
+ andl %esi, %ecx
+ andl 8(%edx), %esi
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, %edx
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx
+ movl %eax, (%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%ebx)
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 16(%ebx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end171:
+ .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L
+
+ .globl mcl_fpDbl_add11L
+ .align 16, 0x90
+ .type mcl_fpDbl_add11L,@function
+mcl_fpDbl_add11L: # @mcl_fpDbl_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %ecx
+ movl 104(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 100(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 52(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %edx, 40(%eax)
+ movl 48(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl 60(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl 72(%ecx), %esi
+ movl 72(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebx
+ movl 76(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 84(%ecx), %ecx
+ movl 84(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 112(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 24(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 28(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 32(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 40(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB172_2
+# BB#1:
+ movl %edi, %ebx
+.LBB172_2:
+ testb %cl, %cl
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ jne .LBB172_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB172_4:
+ movl 100(%esp), %eax
+ movl %ecx, 44(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl %ebp, 56(%eax)
+ movl %edi, 60(%eax)
+ movl %esi, 64(%eax)
+ movl %edx, 68(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ jne .LBB172_6
+# BB#5:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB172_6:
+ movl %edx, 72(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB172_8
+# BB#7:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB172_8:
+ movl %edx, 76(%eax)
+ jne .LBB172_10
+# BB#9:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB172_10:
+ movl %ecx, 80(%eax)
+ movl %ebx, 84(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end172:
+ .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L
+
+ .globl mcl_fpDbl_sub11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub11L,@function
+mcl_fpDbl_sub11L: # @mcl_fpDbl_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %esi
+ movl 100(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %esi
+ movl 8(%edx), %edi
+ sbbl 8(%ebp), %edi
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%ebp), %eax
+ movl %esi, 4(%ecx)
+ movl 16(%edx), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %edi, %eax
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%edx), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%edx), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%edx), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl %esi, 40(%ecx)
+ movl 48(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 104(%esp), %ebp
+ jne .LBB173_1
+# BB#2:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB173_3
+.LBB173_1:
+ movl 40(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+.LBB173_3:
+ testb %al, %al
+ jne .LBB173_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB173_6
+.LBB173_4:
+ movl (%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB173_6:
+ jne .LBB173_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB173_9
+.LBB173_7:
+ movl 36(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB173_9:
+ jne .LBB173_10
+# BB#11:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB173_12
+.LBB173_10:
+ movl 32(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB173_12:
+ jne .LBB173_13
+# BB#14:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB173_15
+.LBB173_13:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB173_15:
+ jne .LBB173_16
+# BB#17:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB173_18
+.LBB173_16:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB173_18:
+ jne .LBB173_19
+# BB#20:
+ movl $0, %edx
+ jmp .LBB173_21
+.LBB173_19:
+ movl 20(%ebp), %edx
+.LBB173_21:
+ jne .LBB173_22
+# BB#23:
+ movl $0, %edi
+ jmp .LBB173_24
+.LBB173_22:
+ movl 16(%ebp), %edi
+.LBB173_24:
+ jne .LBB173_25
+# BB#26:
+ movl $0, %ebx
+ jmp .LBB173_27
+.LBB173_25:
+ movl 12(%ebp), %ebx
+.LBB173_27:
+ jne .LBB173_28
+# BB#29:
+ xorl %ebp, %ebp
+ jmp .LBB173_30
+.LBB173_28:
+ movl 8(%ebp), %ebp
+.LBB173_30:
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 44(%ecx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 52(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 56(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl %eax, 80(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L
+
+ .align 16, 0x90
+ .type .LmulPv384x32,@function
+.LmulPv384x32: # @mulPv384x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl %edx, %ebx
+ movl 100(%esp), %ebp
+ movl %ebp, %eax
+ mull 44(%ebx)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%ebx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 48(%ecx)
+ movl %ecx, %eax
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+
+ .globl mcl_fp_mulUnitPre12L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre12L,@function
+mcl_fp_mulUnitPre12L: # @mcl_fp_mulUnitPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L175$pb
+.L175$pb:
+ popl %ebx
+.Ltmp26:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L
+
+ .globl mcl_fpDbl_mulPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre12L,@function
+mcl_fpDbl_mulPre12L: # @mcl_fpDbl_mulPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L176$pb
+.L176$pb:
+ popl %ebx
+.Ltmp27:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 24(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 48(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 40(%edi), %ebx
+ movl 36(%edi), %eax
+ movl 32(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 24(%edi), %esi
+ adcl 28(%edi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ adcl 16(%edi), %ebx
+ movl %ebx, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ addl 24(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 4(%edi), %eax
+ adcl 28(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl 32(%edi), %eax
+ adcl 8(%edi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl 36(%edi), %eax
+ adcl 12(%edi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ adcl 16(%edi), %ecx
+ movl 44(%edi), %eax
+ adcl 20(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl %esi, -160(%ebp) # 4-byte Spill
+ movl %esi, %edx
+ jb .LBB176_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+.LBB176_2:
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 44(%esi), %edi
+ movl -112(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%esi), %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl %eax, -124(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -116(%ebp) # 4-byte Spill
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -120(%ebp) # 4-byte Spill
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -136(%ebp), %esi # 4-byte Reload
+ movl %esi, -152(%ebp) # 4-byte Spill
+ jb .LBB176_4
+# BB#3:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+.LBB176_4:
+ movl %edx, -84(%ebp)
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -80(%ebp)
+ movl -188(%ebp), %edx # 4-byte Reload
+ movl %edx, -76(%ebp)
+ movl -168(%ebp), %edi # 4-byte Reload
+ movl %edi, -72(%ebp)
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl -140(%ebp), %edx # 4-byte Reload
+ movl %edx, -104(%ebp)
+ movl -144(%ebp), %edx # 4-byte Reload
+ movl %edx, -100(%ebp)
+ movl -148(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %eax, -88(%ebp)
+ movl %edi, %ebx
+ sbbl %edx, %edx
+ movl -132(%ebp), %eax # 4-byte Reload
+ movl %eax, -64(%ebp)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB176_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+.LBB176_6:
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -108(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -84(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -152(%ebp), %edi # 4-byte Reload
+ addl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -128(%ebp) # 4-byte Folded Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl %eax, -120(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -116(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl %eax, -112(%ebp) # 4-byte Folded Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl %eax, -124(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -132(%ebp) # 4-byte Spill
+ movl -164(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ addl -36(%ebp), %edi
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -112(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl %esi, -132(%ebp) # 4-byte Folded Spill
+ movl -60(%ebp), %ecx
+ movl 8(%ebp), %eax
+ subl (%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -56(%ebp), %esi
+ sbbl 4(%eax), %esi
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl -44(%ebp), %ebx
+ sbbl 16(%eax), %ebx
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %esi
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ addl -148(%ebp), %ecx # 4-byte Folded Reload
+ adcl -152(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %esi, 28(%eax)
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 40(%eax)
+ adcl -192(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 44(%eax)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ adcl -196(%ebp), %ecx # 4-byte Folded Reload
+ movl %edi, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ movl -132(%ebp), %edx # 4-byte Reload
+ adcl -216(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %edx, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L
+
+ .globl mcl_fpDbl_sqrPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre12L,@function
+mcl_fpDbl_sqrPre12L: # @mcl_fpDbl_sqrPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L177$pb
+.L177$pb:
+ popl %ebx
+.Ltmp28:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx
+ movl %ebx, -152(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 48(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ movl 36(%edi), %eax
+ movl (%edi), %ebx
+ movl 4(%edi), %esi
+ addl 24(%edi), %ebx
+ adcl 28(%edi), %esi
+ movl 32(%edi), %ecx
+ adcl 8(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ adcl 16(%edi), %edx
+ movl %edx, %ecx
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edx
+ popl %eax
+ movl %edx, -124(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ sbbl %edi, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ addl %edi, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ movl %esi, %eax
+ adcl %edi, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_2
+# BB#1:
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+.LBB177_2:
+ movl -144(%ebp), %esi # 4-byte Reload
+ addl %esi, %esi
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -120(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_3
+# BB#4:
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_5
+.LBB177_3:
+ movl %eax, %edx
+ shrl $31, %edx
+ orl %esi, %edx
+ movl %edx, -120(%ebp) # 4-byte Spill
+.LBB177_5:
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %ecx, %esi
+ addl %esi, %esi
+ adcl %edx, %edx
+ movl -124(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_6
+# BB#7:
+ xorl %edx, %edx
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ jmp .LBB177_8
+.LBB177_6:
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ shrl $31, %ecx
+ orl %esi, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %ecx # 4-byte Reload
+.LBB177_8:
+ movl %edx, -124(%ebp) # 4-byte Spill
+ movl %ebx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -76(%ebp)
+ movl %edi, -72(%ebp)
+ movl %ecx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -64(%ebp)
+ movl %ebx, -108(%ebp)
+ movl %eax, -104(%ebp)
+ movl %esi, -100(%ebp)
+ movl %edi, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl -156(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB177_9
+# BB#10:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_11
+.LBB177_9:
+ shrl $31, %edx
+ movl %edx, -136(%ebp) # 4-byte Spill
+.LBB177_11:
+ leal -108(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -84(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, (%esp)
+ movl -148(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -152(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl -112(%ebp), %eax # 4-byte Reload
+ addl -36(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -32(%ebp), %edi
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -60(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -56(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -44(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -132(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -148(%ebp), %edx # 4-byte Folded Reload
+ adcl -152(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 28(%eax)
+ movl -144(%ebp), %edx # 4-byte Reload
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl -168(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -192(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -196(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -128(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl -216(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %esi, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L
+
+ .globl mcl_fp_mont12L
+ .align 16, 0x90
+ .type mcl_fp_mont12L,@function
+mcl_fp_mont12L: # @mcl_fp_mont12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L178$pb
+.L178$pb:
+ popl %ebx
+.Ltmp29:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx
+ movl 1468(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 1384(%esp), %ebp
+ movl 1388(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1428(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1424(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1420(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1396(%esp), %edi
+ movl 1392(%esp), %esi
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ addl 1328(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 1340(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1372(%esp), %esi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1376(%esp), %ebp
+ sbbl %edi, %edi
+ movl 1464(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1272(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1316(%esp), %ebp
+ adcl 1320(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1216(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1248(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1260(%esp), %ebp
+ adcl 1264(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1160(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1160(%esp), %ecx
+ adcl 1164(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 1204(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1104(%esp), %ecx
+ movl 1468(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1104(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1140(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1148(%esp), %edi
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1152(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1080(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ adcl 1092(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 992(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1008(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1032(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 936(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 948(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl 984(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 880(%esp), %eax
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 924(%esp), %esi
+ movl %esi, %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 840(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 864(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 768(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 780(%esp), %ebp
+ adcl 784(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 800(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 712(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 720(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 752(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %eax, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 656(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 664(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 672(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 676(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 616(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 620(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 636(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 648(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 544(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 560(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 584(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 480(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 376(%esp), %ecx
+ adcl 380(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 320(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ adcl 336(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 360(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 284(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 208(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 224(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ adcl 240(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 152(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 164(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ addl 96(%esp), %edi
+ movl 84(%esp), %ebx # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl 108(%esp), %ebx
+ adcl 112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %esi
+ movl 1468(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 8(%edx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 36(%edx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 40(%edx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ sbbl 44(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB178_2
+# BB#1:
+ movl %ecx, 52(%esp) # 4-byte Spill
+.LBB178_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB178_4
+# BB#3:
+ movl %eax, %ecx
+.LBB178_4:
+ movl 1456(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %edi # 4-byte Reload
+ jne .LBB178_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB178_6:
+ movl %edi, 4(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB178_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB178_8:
+ movl %ebx, 8(%eax)
+ jne .LBB178_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+.LBB178_10:
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB178_12
+# BB#11:
+ movl 28(%esp), %ebp # 4-byte Reload
+.LBB178_12:
+ movl %ebp, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB178_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB178_14:
+ movl %ecx, 20(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB178_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB178_16:
+ movl %ecx, 24(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB178_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB178_18:
+ movl %ecx, 32(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB178_20
+# BB#19:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB178_20:
+ movl %ecx, 36(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB178_22
+# BB#21:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB178_22:
+ movl %ecx, 40(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB178_24
+# BB#23:
+ movl %edx, %ecx
+.LBB178_24:
+ movl %ecx, 44(%eax)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end178:
+ .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L
+
+ .globl mcl_fp_montNF12L
+ .align 16, 0x90
+ .type mcl_fp_montNF12L,@function
+mcl_fp_montNF12L: # @mcl_fp_montNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1420, %esp # imm = 0x58C
+ calll .L179$pb
+.L179$pb:
+ popl %ebx
+.Ltmp30:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx
+ movl 1452(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1368(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1368(%esp), %ebp
+ movl 1372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1416(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1392(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1388(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1380(%esp), %edi
+ movl 1376(%esp), %esi
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1312(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1320(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1344(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1356(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1360(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1304(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ addl 1256(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1260(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1264(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1284(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1296(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1200(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1208(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %ebp
+ adcl 1248(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1192(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1144(%esp), %edx
+ adcl 1148(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1160(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1184(%esp), %ebp
+ adcl 1188(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1088(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 1088(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 1104(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1124(%esp), %esi
+ adcl 1128(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1136(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1080(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1032(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1064(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1076(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 976(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1024(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 968(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 920(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 944(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 956(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 864(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 856(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 808(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 828(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 832(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 752(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 776(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 792(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1448(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 744(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ adcl 700(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 724(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 728(%esp), %edi
+ adcl 732(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 640(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 648(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 660(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ adcl 672(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 632(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 584(%esp), %ecx
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 608(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 528(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 540(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 564(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 520(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 512(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 416(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 440(%esp), %ebp
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 408(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 372(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 304(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 312(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 296(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 248(%esp), %ecx
+ adcl 252(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 260(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 272(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 192(%esp), %esi
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 200(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 184(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 136(%esp), %ecx
+ adcl 140(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 144(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ adcl 164(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 80(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 104(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 112(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1452(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 8(%ebp), %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 76(%esp), %ebp # 4-byte Reload
+ js .LBB179_2
+# BB#1:
+ movl %edx, %ebp
+.LBB179_2:
+ movl 1440(%esp), %edx
+ movl %ebp, (%edx)
+ movl 68(%esp), %edi # 4-byte Reload
+ js .LBB179_4
+# BB#3:
+ movl %eax, %edi
+.LBB179_4:
+ movl %edi, 4(%edx)
+ js .LBB179_6
+# BB#5:
+ movl %ebx, %esi
+.LBB179_6:
+ movl %esi, 8(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB179_8
+# BB#7:
+ movl %ecx, %eax
+.LBB179_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB179_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB179_10:
+ movl %eax, 16(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB179_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB179_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB179_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB179_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB179_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB179_16:
+ movl %eax, 28(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB179_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB179_18:
+ movl %eax, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB179_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB179_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB179_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB179_22:
+ movl %eax, 40(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB179_24
+# BB#23:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB179_24:
+ movl %eax, 44(%edx)
+ addl $1420, %esp # imm = 0x58C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end179:
+ .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L
+
+ .globl mcl_fp_montRed12L
+ .align 16, 0x90
+ .type mcl_fp_montRed12L,@function
+mcl_fp_montRed12L: # @mcl_fp_montRed12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $828, %esp # imm = 0x33C
+ calll .L180$pb
+.L180$pb:
+ popl %eax
+.Ltmp31:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 856(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 852(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 92(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 44(%ecx), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 776(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 776(%esp), %eax
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 796(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 804(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 720(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 724(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 752(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 664(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 692(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 608(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 612(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 552(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 496(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 532(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %esi # 4-byte Reload
+ adcl 476(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 856(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 384(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 400(%esp), %ebp
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 416(%esp), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %esi # 4-byte Reload
+ adcl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 100(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 352(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ adcl 364(%esp), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 272(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 280(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 300(%esp), %esi
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 216(%esp), %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 248(%esp), %esi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 160(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx # 4-byte Reload
+ adcl 176(%esp), %ebx
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 188(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 24(%esp), %edi # 4-byte Folded Reload
+ movl 156(%esp), %esi # 4-byte Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl 28(%esp), %ebx # 4-byte Folded Reload
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 140(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl $0, %ebp
+ andl $1, %ebp
+ jne .LBB180_2
+# BB#1:
+ movl %ebx, 148(%esp) # 4-byte Spill
+.LBB180_2:
+ movl %ebp, %ebx
+ testb %bl, %bl
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB180_4
+# BB#3:
+ movl %edi, %ebx
+.LBB180_4:
+ movl 848(%esp), %edi
+ movl %ebx, (%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB180_6
+# BB#5:
+ movl %esi, 156(%esp) # 4-byte Spill
+.LBB180_6:
+ movl 156(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edi)
+ movl 136(%esp), %esi # 4-byte Reload
+ jne .LBB180_8
+# BB#7:
+ movl %edx, %esi
+.LBB180_8:
+ movl %esi, 8(%edi)
+ movl 148(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB180_10
+# BB#9:
+ movl %ecx, %edx
+.LBB180_10:
+ movl %edx, 16(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ movl 140(%esp), %ecx # 4-byte Reload
+ jne .LBB180_12
+# BB#11:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB180_12:
+ movl %ecx, 20(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ jne .LBB180_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB180_14:
+ movl %eax, 24(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB180_16
+# BB#15:
+ movl 92(%esp), %ebx # 4-byte Reload
+.LBB180_16:
+ movl %ebx, 28(%edi)
+ jne .LBB180_18
+# BB#17:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB180_18:
+ movl %esi, 32(%edi)
+ jne .LBB180_20
+# BB#19:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB180_20:
+ movl %edx, 36(%edi)
+ jne .LBB180_22
+# BB#21:
+ movl 112(%esp), %ecx # 4-byte Reload
+.LBB180_22:
+ movl %ecx, 40(%edi)
+ jne .LBB180_24
+# BB#23:
+ movl 132(%esp), %eax # 4-byte Reload
+.LBB180_24:
+ movl %eax, 44(%edi)
+ addl $828, %esp # imm = 0x33C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end180:
+ .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L
+
+ .globl mcl_fp_addPre12L
+ .align 16, 0x90
+ .type mcl_fp_addPre12L,@function
+mcl_fp_addPre12L: # @mcl_fp_addPre12L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl 44(%eax), %eax
+ movl 44(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 44(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end181:
+ .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L
+
+ .globl mcl_fp_subPre12L
+ .align 16, 0x90
+ .type mcl_fp_subPre12L,@function
+mcl_fp_subPre12L: # @mcl_fp_subPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl 44(%edx), %edx
+ movl 44(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 44(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end182:
+ .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L
+
+ .globl mcl_fp_shr1_12L
+ .align 16, 0x90
+ .type mcl_fp_shr1_12L,@function
+mcl_fp_shr1_12L: # @mcl_fp_shr1_12L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 40(%ecx)
+ shrl %eax
+ movl %eax, 44(%ecx)
+ popl %esi
+ retl
+.Lfunc_end183:
+ .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L
+
+ .globl mcl_fp_add12L
+ .align 16, 0x90
+ .type mcl_fp_add12L,@function
+mcl_fp_add12L: # @mcl_fp_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ebx
+ movl (%ebx), %edx
+ movl 4(%ebx), %ecx
+ movl 60(%esp), %eax
+ addl (%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 8(%ebx), %ecx
+ adcl 8(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 16(%eax), %ecx
+ adcl 12(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ adcl 20(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ adcl 24(%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ adcl 28(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 32(%eax), %ebp
+ adcl 32(%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ adcl 36(%ebx), %edi
+ movl 40(%eax), %esi
+ adcl 40(%ebx), %esi
+ movl 44(%eax), %edx
+ adcl 44(%ebx), %edx
+ movl 56(%esp), %ebx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%ebx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %ebp
+ subl (%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ sbbl 32(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ sbbl 40(%ebp), %esi
+ sbbl 44(%ebp), %edx
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB184_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+.LBB184_2: # %carry
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end184:
+ .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L
+
+ .globl mcl_fp_addNF12L
+ .align 16, 0x90
+ .type mcl_fp_addNF12L,@function
+mcl_fp_addNF12L: # @mcl_fp_addNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ movl 112(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 4(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ movl 36(%esi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ adcl 12(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 28(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 32(%edx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%edx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 40(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ subl (%ebp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB185_2
+# BB#1:
+ movl %edx, %edi
+.LBB185_2:
+ movl 108(%esp), %edx
+ movl %edi, (%edx)
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB185_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB185_4:
+ movl %edi, 4(%edx)
+ movl %eax, %ebp
+ js .LBB185_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB185_6:
+ movl %esi, 8(%edx)
+ movl %ecx, %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB185_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB185_8:
+ movl %ecx, 12(%edx)
+ movl 76(%esp), %ebx # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ js .LBB185_10
+# BB#9:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB185_10:
+ movl %eax, 16(%edx)
+ movl 80(%esp), %ecx # 4-byte Reload
+ js .LBB185_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+.LBB185_12:
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edx)
+ js .LBB185_14
+# BB#13:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB185_14:
+ movl %ebp, 24(%edx)
+ js .LBB185_16
+# BB#15:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB185_16:
+ movl %edi, 28(%edx)
+ js .LBB185_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB185_18:
+ movl %ebx, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB185_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB185_20:
+ movl %eax, 36(%edx)
+ js .LBB185_22
+# BB#21:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB185_22:
+ movl %esi, 40(%edx)
+ js .LBB185_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB185_24:
+ movl %ecx, 44(%edx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end185:
+ .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L
+
+ .globl mcl_fp_sub12L
+ .align 16, 0x90
+ .type mcl_fp_sub12L,@function
+mcl_fp_sub12L: # @mcl_fp_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%esi), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ sbbl 40(%edi), %ebp
+ movl 44(%esi), %esi
+ sbbl 44(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl %ebp, 40(%ebx)
+ movl %esi, 44(%ebx)
+ je .LBB186_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 72(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 44(%ebx)
+.LBB186_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end186:
+ .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L
+
+ .globl mcl_fp_subNF12L
+ .align 16, 0x90
+ .type mcl_fp_subNF12L,@function
+mcl_fp_subNF12L: # @mcl_fp_subNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 100(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %edi
+ adcl %edi, %edi
+ movl %eax, %ebp
+ adcl %ebp, %ebp
+ movl %eax, %esi
+ adcl %esi, %esi
+ shrl $31, %ecx
+ orl %edx, %ecx
+ movl 104(%esp), %edx
+ andl 12(%edx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ andl 8(%edx), %ebp
+ andl 4(%edx), %edi
+ andl (%edx), %ecx
+ movl 44(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 28(%edx), %esi
+ andl %eax, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 24(%edx), %ebx
+ andl %eax, %ebx
+ movl 20(%edx), %esi
+ andl %eax, %esi
+ andl 16(%edx), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %edx
+ movl %ecx, (%edx)
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 4(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 16(%edx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edx)
+ movl %eax, 40(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end187:
+ .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L
+
+ .globl mcl_fpDbl_add12L
+ .align 16, 0x90
+ .type mcl_fpDbl_add12L,@function
+mcl_fpDbl_add12L: # @mcl_fpDbl_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %ecx
+ movl 112(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 108(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 56(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%edi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %ebx
+ movl %esi, 44(%eax)
+ movl 52(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl 60(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl 64(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl 72(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl 76(%edi), %edx
+ adcl %eax, %edx
+ movl 80(%ecx), %esi
+ movl 80(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebx
+ movl 84(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 92(%ecx), %ecx
+ movl 92(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 120(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl 36(%ebp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 44(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 44(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB188_2
+# BB#1:
+ movl %edi, %ebx
+.LBB188_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB188_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB188_4:
+ movl 108(%esp), %eax
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl %ebp, 64(%eax)
+ movl %edi, 68(%eax)
+ movl %esi, 72(%eax)
+ movl %edx, 76(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB188_6
+# BB#5:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB188_6:
+ movl %edx, 80(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB188_8
+# BB#7:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB188_8:
+ movl %edx, 84(%eax)
+ jne .LBB188_10
+# BB#9:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB188_10:
+ movl %ecx, 88(%eax)
+ movl %ebx, 92(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end188:
+ .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L
+
+ .globl mcl_fpDbl_sub12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub12L,@function
+mcl_fpDbl_sub12L: # @mcl_fpDbl_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ movl 100(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 104(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%esi), %edi
+ sbbl 8(%ebx), %edi
+ movl 96(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%ebx), %edx
+ movl %edi, 8(%ecx)
+ movl 20(%ebx), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %edi, %eax
+ movl 24(%ebx), %edi
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %edi, %edx
+ movl 28(%ebx), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %edi, %eax
+ movl 32(%ebx), %edi
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %edi, %edx
+ movl 36(%ebx), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%esi), %eax
+ sbbl %edi, %eax
+ movl 40(%ebx), %edi
+ movl %edx, 32(%ecx)
+ movl 40(%esi), %edx
+ sbbl %edi, %edx
+ movl 44(%ebx), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%esi), %eax
+ sbbl %edi, %eax
+ movl 48(%ebx), %edi
+ movl %edx, 40(%ecx)
+ movl 48(%esi), %edx
+ sbbl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 52(%ebx), %edx
+ movl %eax, 44(%ecx)
+ movl 52(%esi), %eax
+ sbbl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 108(%esp), %ebp
+ jne .LBB189_1
+# BB#2:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB189_3
+.LBB189_1:
+ movl 44(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+.LBB189_3:
+ testb %al, %al
+ jne .LBB189_4
+# BB#5:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB189_6
+.LBB189_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB189_6:
+ jne .LBB189_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB189_9
+.LBB189_7:
+ movl 40(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB189_9:
+ jne .LBB189_10
+# BB#11:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB189_12
+.LBB189_10:
+ movl 36(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB189_12:
+ jne .LBB189_13
+# BB#14:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB189_15
+.LBB189_13:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB189_15:
+ jne .LBB189_16
+# BB#17:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB189_18
+.LBB189_16:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB189_18:
+ jne .LBB189_19
+# BB#20:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB189_21
+.LBB189_19:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB189_21:
+ jne .LBB189_22
+# BB#23:
+ movl $0, %ebx
+ jmp .LBB189_24
+.LBB189_22:
+ movl 20(%ebp), %ebx
+.LBB189_24:
+ jne .LBB189_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB189_27
+.LBB189_25:
+ movl 16(%ebp), %eax
+.LBB189_27:
+ jne .LBB189_28
+# BB#29:
+ movl %ebp, %edx
+ movl $0, %ebp
+ jmp .LBB189_30
+.LBB189_28:
+ movl %ebp, %edx
+ movl 12(%edx), %ebp
+.LBB189_30:
+ jne .LBB189_31
+# BB#32:
+ xorl %edx, %edx
+ jmp .LBB189_33
+.LBB189_31:
+ movl 8(%edx), %edx
+.LBB189_33:
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl %eax, 88(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%ecx)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L
+
+ .align 16, 0x90
+ .type .LmulPv416x32,@function
+.LmulPv416x32: # @mulPv416x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl %edx, %edi
+ movl 108(%esp), %ebp
+ movl %ebp, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%ecx)
+ movl %ecx, %eax
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+
+ .globl mcl_fp_mulUnitPre13L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre13L,@function
+mcl_fp_mulUnitPre13L: # @mcl_fp_mulUnitPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L191$pb
+.L191$pb:
+ popl %ebx
+.Ltmp32:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv416x32
+ movl 100(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L
+
+ .globl mcl_fpDbl_mulPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre13L,@function
+mcl_fpDbl_mulPre13L: # @mcl_fpDbl_mulPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L192$pb
+.L192$pb:
+ popl %edi
+.Ltmp33:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 868(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 872(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %edi
+ movl 44(%edi), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 868(%esp), %eax
+ movl %eax, %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L
+
+ .globl mcl_fpDbl_sqrPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre13L,@function
+mcl_fpDbl_sqrPre13L: # @mcl_fpDbl_sqrPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L193$pb
+.L193$pb:
+ popl %ebx
+.Ltmp34:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 868(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L
+
+ .globl mcl_fp_mont13L
+ .align 16, 0x90
+ .type mcl_fp_mont13L,@function
+mcl_fp_mont13L: # @mcl_fp_mont13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L194$pb
+.L194$pb:
+ popl %ebx
+.Ltmp35:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %esi
+ movl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %edi
+ movl 1500(%esp), %ebp
+ movl 1496(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1444(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 1448(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1472(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1376(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1388(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1404(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1420(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1428(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1360(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1364(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1368(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1372(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1264(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1308(%esp), %ebp
+ adcl 1312(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1208(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1248(%esp), %edi
+ adcl 1252(%esp), %ebp
+ movl %ebp, %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1260(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1152(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1192(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1096(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1132(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1136(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1144(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1040(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1068(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 1072(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1084(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 984(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 872(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 904(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 844(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 848(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 792(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 812(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 712(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ adcl 724(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 748(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 752(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %eax, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 648(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 652(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 656(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 664(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 612(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 536(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 544(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 560(%esp), %esi
+ adcl 564(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ addl 368(%esp), %ebp
+ adcl 372(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 376(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 312(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 344(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 268(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 200(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 208(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ adcl 148(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 152(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 88(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 112(%esp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 116(%esp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 120(%esp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 124(%esp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 128(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 132(%esp), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 136(%esp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 140(%esp), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1580(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %ecx
+ sbbl 8(%ebx), %ebp
+ sbbl 12(%ebx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ sbbl 20(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 28(%ebx), %edx
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, %ebx
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB194_2
+# BB#1:
+ movl %edx, 32(%esp) # 4-byte Spill
+.LBB194_2:
+ movl %edi, %edx
+ testb %dl, %dl
+ movl 80(%esp), %edx # 4-byte Reload
+ jne .LBB194_4
+# BB#3:
+ movl %eax, %edx
+.LBB194_4:
+ movl 1568(%esp), %eax
+ movl %edx, (%eax)
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB194_6
+# BB#5:
+ movl %ecx, %esi
+.LBB194_6:
+ movl %esi, 4(%eax)
+ jne .LBB194_8
+# BB#7:
+ movl %ebp, 76(%esp) # 4-byte Spill
+.LBB194_8:
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB194_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+.LBB194_10:
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB194_12
+# BB#11:
+ movl 8(%esp), %ebp # 4-byte Reload
+.LBB194_12:
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB194_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB194_14:
+ movl %ecx, 20(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB194_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB194_16:
+ movl %ecx, 24(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB194_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB194_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB194_20
+# BB#19:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB194_20:
+ movl %ecx, 36(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB194_22
+# BB#21:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB194_22:
+ movl %ecx, 40(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB194_24
+# BB#23:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB194_24:
+ movl %ecx, 44(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB194_26
+# BB#25:
+ movl %ebx, %ecx
+.LBB194_26:
+ movl %ecx, 48(%eax)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end194:
+ .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L
+
+ .globl mcl_fp_montNF13L
+ .align 16, 0x90
+ .type mcl_fp_montNF13L,@function
+mcl_fp_montNF13L: # @mcl_fp_montNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L195$pb
+.L195$pb:
+ popl %ebx
+.Ltmp36:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %edi
+ movl 1492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1500(%esp), %esi
+ movl 1496(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ adcl 1444(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1472(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1484(%esp), %edi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1428(%esp), %ecx
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1376(%esp), %edx
+ adcl 1380(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1424(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1320(%esp), %esi
+ adcl 1324(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1360(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1316(%esp), %eax
+ addl 1264(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 1284(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1208(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 1228(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1252(%esp), %edi
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1256(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1204(%esp), %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 1152(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1160(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1192(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1196(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ addl 1096(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1116(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1120(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1140(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1148(%esp), %ebp
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1092(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 1040(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1056(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 1060(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1088(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl $0, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 984(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 996(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1008(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 980(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 948(%esp), %ebp
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 968(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 872(%esp), %edi
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 912(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 868(%esp), %edx
+ addl 816(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 860(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 760(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 780(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 788(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 804(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 756(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 716(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 728(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 732(%esp), %esi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 744(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 748(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 752(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 648(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 696(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 644(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 624(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 536(%esp), %edi
+ adcl 540(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 556(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ adcl 568(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 532(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 496(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 504(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 424(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 452(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 420(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 368(%esp), %ecx
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 392(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 312(%esp), %esi
+ adcl 316(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 308(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ adcl 260(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 200(%esp), %esi
+ adcl 204(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 216(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 228(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 196(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 156(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 164(%esp), %ebp
+ adcl 168(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 88(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 100(%esp), %edi
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 104(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1580(%esp), %eax
+ subl (%eax), %edx
+ movl %ecx, %ebp
+ sbbl 4(%eax), %ebp
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ sbbl 12(%eax), %ebx
+ sbbl 16(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 32(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 48(%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB195_2
+# BB#1:
+ movl %edx, %eax
+.LBB195_2:
+ movl 1568(%esp), %edx
+ movl %eax, (%edx)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB195_4
+# BB#3:
+ movl %ebp, %esi
+.LBB195_4:
+ movl %esi, 4(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB195_6
+# BB#5:
+ movl %ecx, %edi
+.LBB195_6:
+ movl %edi, 8(%edx)
+ js .LBB195_8
+# BB#7:
+ movl %ebx, %eax
+.LBB195_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB195_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB195_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB195_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB195_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB195_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB195_14:
+ movl %eax, 24(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB195_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB195_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB195_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB195_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB195_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB195_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB195_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB195_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB195_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB195_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB195_26
+# BB#25:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB195_26:
+ movl %eax, 48(%edx)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end195:
+ .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L
+
+ .globl mcl_fp_montRed13L
+ .align 16, 0x90
+ .type mcl_fp_montRed13L,@function
+mcl_fp_montRed13L: # @mcl_fp_montRed13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $892, %esp # imm = 0x37C
+ calll .L196$pb
+.L196$pb:
+ popl %eax
+.Ltmp37:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 920(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 916(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 100(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 832(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 832(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 836(%esp), %ecx
+ adcl 840(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 860(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ andl $1, %esi
+ addl 776(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 720(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 664(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 608(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 612(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 552(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 496(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl 476(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 384(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 408(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 920(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 344(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 360(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 272(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 280(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 300(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 216(%esp), %ebp
+ movl %edi, %ecx
+ adcl 220(%esp), %ecx
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 160(%esp), %esi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %edi, %eax
+ adcl 184(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 156(%esp), %edi # 4-byte Reload
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %ebx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %edx # 4-byte Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %esi, %eax
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 124(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB196_2
+# BB#1:
+ movl %ebp, 148(%esp) # 4-byte Spill
+.LBB196_2:
+ testb %al, %al
+ movl 156(%esp), %ebp # 4-byte Reload
+ jne .LBB196_4
+# BB#3:
+ movl %edi, %ebp
+.LBB196_4:
+ movl 912(%esp), %edi
+ movl %ebp, (%edi)
+ movl 140(%esp), %ebp # 4-byte Reload
+ jne .LBB196_6
+# BB#5:
+ movl %ebx, %ebp
+.LBB196_6:
+ movl %ebp, 4(%edi)
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB196_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB196_8:
+ movl %ebx, 8(%edi)
+ movl 148(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edi)
+ movl 116(%esp), %ebx # 4-byte Reload
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB196_10
+# BB#9:
+ movl 72(%esp), %esi # 4-byte Reload
+.LBB196_10:
+ movl %esi, 16(%edi)
+ movl 112(%esp), %esi # 4-byte Reload
+ movl 132(%esp), %edx # 4-byte Reload
+ jne .LBB196_12
+# BB#11:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB196_12:
+ movl %edx, 20(%edi)
+ movl 96(%esp), %edx # 4-byte Reload
+ movl 144(%esp), %ecx # 4-byte Reload
+ jne .LBB196_14
+# BB#13:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB196_14:
+ movl %ecx, 24(%edi)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl 136(%esp), %eax # 4-byte Reload
+ jne .LBB196_16
+# BB#15:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB196_16:
+ movl %eax, 28(%edi)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB196_18
+# BB#17:
+ movl 88(%esp), %ebx # 4-byte Reload
+.LBB196_18:
+ movl %ebx, 32(%edi)
+ jne .LBB196_20
+# BB#19:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB196_20:
+ movl %esi, 36(%edi)
+ jne .LBB196_22
+# BB#21:
+ movl 108(%esp), %edx # 4-byte Reload
+.LBB196_22:
+ movl %edx, 40(%edi)
+ jne .LBB196_24
+# BB#23:
+ movl 120(%esp), %ecx # 4-byte Reload
+.LBB196_24:
+ movl %ecx, 44(%edi)
+ jne .LBB196_26
+# BB#25:
+ movl 124(%esp), %eax # 4-byte Reload
+.LBB196_26:
+ movl %eax, 48(%edi)
+ addl $892, %esp # imm = 0x37C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end196:
+ .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L
+
+ .globl mcl_fp_addPre13L
+ .align 16, 0x90
+ .type mcl_fp_addPre13L,@function
+mcl_fp_addPre13L: # @mcl_fp_addPre13L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl 48(%eax), %eax
+ movl 48(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 48(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end197:
+ .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L
+
+ .globl mcl_fp_subPre13L
+ .align 16, 0x90
+ .type mcl_fp_subPre13L,@function
+mcl_fp_subPre13L: # @mcl_fp_subPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 40(%ebp)
+ movl %esi, 44(%ebp)
+ movl 48(%edx), %edx
+ movl 48(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 48(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end198:
+ .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L
+
+ .globl mcl_fp_shr1_13L
+ .align 16, 0x90
+ .type mcl_fp_shr1_13L,@function
+mcl_fp_shr1_13L: # @mcl_fp_shr1_13L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 44(%ecx)
+ shrl %eax
+ movl %eax, 48(%ecx)
+ popl %esi
+ retl
+.Lfunc_end199:
+ .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L
+
+ .globl mcl_fp_add13L
+ .align 16, 0x90
+ .type mcl_fp_add13L,@function
+mcl_fp_add13L: # @mcl_fp_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 64(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%ebx), %eax
+ adcl 24(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ adcl 28(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ adcl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ adcl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 40(%ebx), %edi
+ adcl 40(%ebp), %edi
+ movl 44(%ebx), %edx
+ adcl 44(%ebp), %edx
+ movl 48(%ebx), %esi
+ adcl 48(%ebp), %esi
+ movl 60(%esp), %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ecx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 72(%esp), %ecx
+ subl (%ecx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 4(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ sbbl 8(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ecx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ sbbl 36(%ecx), %ebx
+ sbbl 40(%ecx), %edi
+ sbbl 44(%ecx), %edx
+ sbbl 48(%ecx), %esi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB200_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ebx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+.LBB200_2: # %carry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end200:
+ .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L
+
+ .globl mcl_fp_addNF13L
+ .align 16, 0x90
+ .type mcl_fp_addNF13L,@function
+mcl_fp_addNF13L: # @mcl_fp_addNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 124(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 4(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ movl 28(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %esi
+ adcl 8(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 12(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 24(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 28(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 32(%edx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%edx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 40(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 132(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ subl (%edx), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %ebp
+ sbbl 36(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %edi
+ sbbl 40(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 64(%esp), %edx # 4-byte Reload
+ js .LBB201_2
+# BB#1:
+ movl %eax, %edx
+.LBB201_2:
+ movl 120(%esp), %esi
+ movl %edx, (%esi)
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB201_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB201_4:
+ movl %edx, 4(%esi)
+ movl %edi, %edx
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB201_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB201_6:
+ movl %eax, 8(%esi)
+ movl %ebp, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB201_8
+# BB#7:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB201_8:
+ movl %ebx, 12(%esi)
+ movl 96(%esp), %ebp # 4-byte Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ js .LBB201_10
+# BB#9:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB201_10:
+ movl %ecx, 16(%esi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB201_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB201_12:
+ movl %eax, 20(%esi)
+ movl 72(%esp), %ebx # 4-byte Reload
+ js .LBB201_14
+# BB#13:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB201_14:
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%esi)
+ js .LBB201_16
+# BB#15:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB201_16:
+ movl %ebp, 28(%esi)
+ js .LBB201_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB201_18:
+ movl %ebx, 32(%esi)
+ js .LBB201_20
+# BB#19:
+ movl 32(%esp), %edi # 4-byte Reload
+.LBB201_20:
+ movl %edi, 36(%esi)
+ js .LBB201_22
+# BB#21:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB201_22:
+ movl %edx, 40(%esi)
+ js .LBB201_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB201_24:
+ movl %ecx, 44(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB201_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB201_26:
+ movl %eax, 48(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end201:
+ .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L
+
+ .globl mcl_fp_sub13L
+ .align 16, 0x90
+ .type mcl_fp_sub13L,@function
+mcl_fp_sub13L: # @mcl_fp_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 72(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %edx
+ sbbl 32(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ sbbl 36(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 44(%esi), %ebp
+ sbbl 44(%edi), %ebp
+ movl 48(%esi), %esi
+ sbbl 48(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 64(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+ movl %eax, 40(%ebx)
+ movl %ebp, 44(%ebx)
+ movl %esi, 48(%ebx)
+ je .LBB202_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 76(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 48(%ebx)
+.LBB202_2: # %nocarry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end202:
+ .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L
+
+ .globl mcl_fp_subNF13L
+ .align 16, 0x90
+ .type mcl_fp_subNF13L,@function
+mcl_fp_subNF13L: # @mcl_fp_subNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 112(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %ebx
+ movl 24(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ sbbl 48(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edx, %eax
+ sarl $31, %edi
+ movl %edi, %edx
+ shldl $1, %eax, %edx
+ movl 116(%esp), %esi
+ movl 4(%esi), %eax
+ andl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ andl (%esi), %edx
+ movl 48(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %edi, %ebp
+ movl 16(%esi), %ebx
+ andl %edi, %ebx
+ movl 12(%esi), %ecx
+ andl %edi, %ecx
+ roll %edi
+ andl 8(%esi), %edi
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %esi
+ movl %edx, (%esi)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 4(%esi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl %eax, 44(%esi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end203:
+ .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L
+
+ .globl mcl_fpDbl_add13L
+ .align 16, 0x90
+ .type mcl_fpDbl_add13L,@function
+mcl_fpDbl_add13L: # @mcl_fpDbl_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 124(%esp), %ecx
+ movl 120(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 116(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 60(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edx, 48(%eax)
+ movl 56(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 100(%ecx), %ecx
+ movl 100(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 128(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 44(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 48(%ebp), %ecx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB204_2
+# BB#1:
+ movl %ecx, %ebx
+.LBB204_2:
+ testb %dl, %dl
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB204_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB204_4:
+ movl 116(%esp), %eax
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ movl %ebp, 72(%eax)
+ movl %edi, 76(%eax)
+ movl %esi, 80(%eax)
+ movl %edx, 84(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ jne .LBB204_6
+# BB#5:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB204_6:
+ movl %esi, 88(%eax)
+ jne .LBB204_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB204_8:
+ movl %edx, 92(%eax)
+ jne .LBB204_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB204_10:
+ movl %ecx, 96(%eax)
+ movl %ebx, 100(%eax)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end204:
+ .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L
+
+ .globl mcl_fpDbl_sub13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub13L,@function
+mcl_fpDbl_sub13L: # @mcl_fpDbl_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 112(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl 36(%ebx), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %esi, %eax
+ movl 40(%ebx), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%edi), %edx
+ sbbl %esi, %edx
+ movl 44(%ebx), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%edi), %eax
+ sbbl %esi, %eax
+ movl 48(%ebx), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%edi), %edx
+ sbbl %esi, %edx
+ movl 52(%ebx), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%edi), %eax
+ sbbl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl %edx, 48(%ecx)
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 96(%ebx), %eax
+ movl 96(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%ebx), %eax
+ movl 100(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 116(%esp), %edi
+ jne .LBB205_1
+# BB#2:
+ movl $0, 44(%esp) # 4-byte Folded Spill
+ jmp .LBB205_3
+.LBB205_1:
+ movl 48(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+.LBB205_3:
+ testb %al, %al
+ jne .LBB205_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB205_6
+.LBB205_4:
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB205_6:
+ jne .LBB205_7
+# BB#8:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB205_9
+.LBB205_7:
+ movl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB205_9:
+ jne .LBB205_10
+# BB#11:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB205_12
+.LBB205_10:
+ movl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB205_12:
+ jne .LBB205_13
+# BB#14:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB205_15
+.LBB205_13:
+ movl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB205_15:
+ jne .LBB205_16
+# BB#17:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB205_18
+.LBB205_16:
+ movl 32(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB205_18:
+ jne .LBB205_19
+# BB#20:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB205_21
+.LBB205_19:
+ movl 28(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB205_21:
+ jne .LBB205_22
+# BB#23:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB205_24
+.LBB205_22:
+ movl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB205_24:
+ jne .LBB205_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB205_27
+.LBB205_25:
+ movl 20(%edi), %eax
+.LBB205_27:
+ jne .LBB205_28
+# BB#29:
+ movl $0, %edx
+ jmp .LBB205_30
+.LBB205_28:
+ movl 16(%edi), %edx
+.LBB205_30:
+ jne .LBB205_31
+# BB#32:
+ movl $0, %esi
+ jmp .LBB205_33
+.LBB205_31:
+ movl 12(%edi), %esi
+.LBB205_33:
+ jne .LBB205_34
+# BB#35:
+ xorl %edi, %edi
+ jmp .LBB205_36
+.LBB205_34:
+ movl 8(%edi), %edi
+.LBB205_36:
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 56(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 64(%ecx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl %eax, 96(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L
+
+ .align 16, 0x90
+ .type .LmulPv448x32,@function
+.LmulPv448x32: # @mulPv448x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl %edx, %edi
+ movl 116(%esp), %esi
+ movl %esi, %eax
+ mull 52(%edi)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%ecx)
+ movl %ecx, %eax
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+
+ .globl mcl_fp_mulUnitPre14L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre14L,@function
+mcl_fp_mulUnitPre14L: # @mcl_fp_mulUnitPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L207$pb
+.L207$pb:
+ popl %ebx
+.Ltmp38:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L
+
+ .globl mcl_fpDbl_mulPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre14L,@function
+mcl_fpDbl_mulPre14L: # @mcl_fpDbl_mulPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L208$pb
+.L208$pb:
+ popl %ebx
+.Ltmp39:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx
+ movl %ebx, -192(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 28(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 56(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 36(%edi), %edx
+ movl (%edi), %edi
+ movl 12(%ebp), %ecx
+ movl 4(%ecx), %ecx
+ movl 12(%ebp), %ebx
+ addl 28(%ebx), %edi
+ movl %edi, -180(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ adcl 32(%edi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -212(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl 16(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl %eax, %ebx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl (%esi), %eax
+ addl 28(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ movl 4(%esi), %eax
+ adcl 32(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 40(%esi), %eax
+ adcl 12(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 44(%esi), %eax
+ adcl 16(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl 52(%esi), %eax
+ adcl 24(%esi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -220(%ebp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -188(%ebp) # 4-byte Spill
+ jb .LBB208_2
+# BB#1:
+ xorl %esi, %esi
+ movl $0, -184(%ebp) # 4-byte Folded Spill
+ movl $0, -188(%ebp) # 4-byte Folded Spill
+.LBB208_2:
+ movl %esi, -204(%ebp) # 4-byte Spill
+ movl 52(%edi), %esi
+ movl 48(%edi), %ebx
+ movl -128(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%edi), %ebx
+ movl %ebx, -160(%ebp) # 4-byte Spill
+ adcl 24(%edi), %esi
+ movl %esi, -208(%ebp) # 4-byte Spill
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -132(%ebp) # 4-byte Spill
+ movl -168(%ebp), %esi # 4-byte Reload
+ movl %esi, -136(%ebp) # 4-byte Spill
+ movl -164(%ebp), %esi # 4-byte Reload
+ movl %esi, -140(%ebp) # 4-byte Spill
+ movl -216(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -144(%ebp) # 4-byte Spill
+ jb .LBB208_4
+# BB#3:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+.LBB208_4:
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl -200(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -212(%ebp), %edx # 4-byte Reload
+ movl %edx, -88(%ebp)
+ movl -196(%ebp), %edi # 4-byte Reload
+ movl %edi, -84(%ebp)
+ movl -156(%ebp), %edx # 4-byte Reload
+ movl %edx, -80(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -164(%ebp), %edx # 4-byte Reload
+ movl %edx, -120(%ebp)
+ movl -168(%ebp), %edx # 4-byte Reload
+ movl %edx, -116(%ebp)
+ movl -172(%ebp), %edx # 4-byte Reload
+ movl %edx, -112(%ebp)
+ movl -176(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl %ecx, -104(%ebp)
+ movl %edi, %ebx
+ movl %esi, %edi
+ movl %eax, -100(%ebp)
+ sbbl %edx, %edx
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -76(%ebp)
+ movl -208(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -220(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB208_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %edi
+.LBB208_6:
+ movl %eax, -160(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -124(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -96(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -68(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -188(%ebp), %eax # 4-byte Reload
+ addl %eax, -144(%ebp) # 4-byte Folded Spill
+ adcl %edi, -140(%ebp) # 4-byte Folded Spill
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl %eax, -136(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -132(%ebp) # 4-byte Folded Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl -152(%ebp), %edi # 4-byte Reload
+ adcl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -148(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl -192(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -144(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ adcl -20(%ebp), %edi
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -68(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -64(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -60(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -44(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -152(%ebp) # 4-byte Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ movl -148(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ sbbl $0, -156(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ subl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl -156(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -172(%ebp), %eax # 4-byte Reload
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ adcl -188(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 40(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -232(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -236(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -132(%ebp), %ecx # 4-byte Reload
+ adcl -240(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -244(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -248(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -252(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ adcl -256(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl %edi, 84(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L
+
+ .globl mcl_fpDbl_sqrPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre14L,@function
+mcl_fpDbl_sqrPre14L: # @mcl_fpDbl_sqrPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L209$pb
+.L209$pb:
+ popl %ebx
+.Ltmp40:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx
+ movl %ebx, -172(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 56(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 48(%edi), %eax
+ movl 44(%edi), %ecx
+ movl 36(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ebx
+ addl 28(%edi), %esi
+ adcl 32(%edi), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ adcl 12(%edi), %edx
+ adcl 16(%edi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 52(%edi), %ecx
+ adcl 24(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ seto %al
+ lahf
+ movl %eax, %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_1
+# BB#2:
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_3
+.LBB209_1:
+ leal (%esi,%esi), %eax
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl %eax, -132(%ebp) # 4-byte Spill
+.LBB209_3:
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ movl -180(%ebp), %ebx # 4-byte Reload
+ jb .LBB209_4
+# BB#5:
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_6
+.LBB209_4:
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl -168(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+.LBB209_6:
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl -136(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_7
+# BB#8:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_9
+.LBB209_7:
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl -164(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+.LBB209_9:
+ movl %ebx, %esi
+ movl -140(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_10
+# BB#11:
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_12
+.LBB209_10:
+ movl %edx, %eax
+ movl -160(%ebp), %ebx # 4-byte Reload
+ shldl $1, %ebx, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+.LBB209_12:
+ movl -144(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_13
+# BB#14:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_15
+.LBB209_13:
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+.LBB209_15:
+ movl -148(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_16
+# BB#17:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_18
+.LBB209_16:
+ movl %edi, %eax
+ shldl $1, %esi, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+.LBB209_18:
+ movl -152(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_19
+# BB#20:
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_21
+.LBB209_19:
+ movl %ecx, %eax
+ shldl $1, %edi, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+.LBB209_21:
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, -96(%ebp)
+ movl %eax, -124(%ebp)
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl %eax, -92(%ebp)
+ movl %eax, -120(%ebp)
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -116(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -112(%ebp)
+ movl %esi, -80(%ebp)
+ movl %esi, -108(%ebp)
+ movl %edi, -76(%ebp)
+ movl %edi, -104(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %ecx, -100(%ebp)
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_22
+# BB#23:
+ xorl %edi, %edi
+ jmp .LBB209_24
+.LBB209_22:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB209_24:
+ leal -68(%ebp), %eax
+ movl %eax, (%esp)
+ leal -96(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -124(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -128(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -172(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -132(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -32(%ebp), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -28(%ebp), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -24(%ebp), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -20(%ebp), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -16(%ebp), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -68(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -60(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -168(%ebp) # 4-byte Spill
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %ecx
+ sbbl 20(%esi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -44(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -164(%ebp) # 4-byte Spill
+ movl 28(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ sbbl %edx, -132(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl -128(%ebp), %ecx # 4-byte Reload
+ sbbl $0, %ecx
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edx # 4-byte Reload
+ subl %eax, %edx
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ sbbl $0, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl %edx, %eax
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -188(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -156(%ebp), %edx # 4-byte Reload
+ adcl -232(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -236(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 64(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -244(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 68(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -248(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 72(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -252(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 76(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -256(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 80(%esi)
+ movl %eax, 84(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L
+
+ .globl mcl_fp_mont14L
+ .align 16, 0x90
+ .type mcl_fp_mont14L,@function
+mcl_fp_mont14L: # @mcl_fp_mont14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1900, %esp # imm = 0x76C
+ calll .L210$pb
+.L210$pb:
+ popl %ebx
+.Ltmp41:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx
+ movl 1932(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 1840(%esp), %edi
+ movl 1844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1896(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 1892(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 1888(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1884(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1880(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1860(%esp), %esi
+ movl 1856(%esp), %ebp
+ movl 1852(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ addl 1776(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1792(%esp), %ebp
+ adcl 1796(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1928(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1712(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1724(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 1728(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1732(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 100(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1648(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1668(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1672(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 1928(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1584(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1584(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1604(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1608(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1636(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 1640(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1520(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1520(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 1544(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1564(%esp), %ebp
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1572(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1456(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1456(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1496(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1500(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1392(%esp), %ecx
+ movl 1932(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %eax
+ addl 1392(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1396(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1420(%esp), %esi
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1424(%esp), %ebp
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1428(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1432(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1924(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1328(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1356(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1384(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1264(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1284(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1316(%esp), %esi
+ adcl 1320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1200(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ adcl 1216(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1248(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1136(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ adcl 1148(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1172(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1180(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1072(%esp), %eax
+ adcl 1076(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1108(%esp), %ebp
+ adcl 1112(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1124(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1128(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1008(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1008(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 944(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 952(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 964(%esp), %esi
+ adcl 968(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 992(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 880(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 896(%esp), %edi
+ adcl 900(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 924(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 824(%esp), %ebp
+ adcl 828(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 856(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 752(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 760(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 784(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 792(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 796(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 800(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 808(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 688(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 728(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 624(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 648(%esp), %esi
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 652(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 560(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 568(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 584(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 528(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 540(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 440(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 368(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 376(%esp), %esi
+ adcl 380(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 312(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 324(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 240(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 248(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 252(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 256(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 268(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 176(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 184(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 192(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 200(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 112(%esp), %esi
+ movl 100(%esp), %esi # 4-byte Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1932(%esp), %ecx
+ subl (%ecx), %eax
+ sbbl 4(%ecx), %edx
+ sbbl 8(%ecx), %esi
+ sbbl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 20(%ecx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 32(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 36(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ecx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ sbbl 48(%ecx), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ sbbl 52(%ecx), %edi
+ movl %ebp, %ecx
+ movl %edi, 104(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB210_2
+# BB#1:
+ movl %ebx, 60(%esp) # 4-byte Spill
+.LBB210_2:
+ testb %cl, %cl
+ movl 108(%esp), %ebx # 4-byte Reload
+ jne .LBB210_4
+# BB#3:
+ movl %eax, %ebx
+.LBB210_4:
+ movl 1920(%esp), %eax
+ movl %ebx, (%eax)
+ movl 92(%esp), %edi # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB210_6
+# BB#5:
+ movl %edx, %edi
+.LBB210_6:
+ movl %edi, 4(%eax)
+ jne .LBB210_8
+# BB#7:
+ movl %esi, 100(%esp) # 4-byte Spill
+.LBB210_8:
+ movl 100(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ jne .LBB210_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+.LBB210_10:
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ jne .LBB210_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB210_12:
+ movl %ecx, 16(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB210_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB210_14:
+ movl %ecx, 20(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB210_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB210_16:
+ movl %ecx, 24(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB210_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB210_18:
+ movl %ecx, 32(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB210_20
+# BB#19:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB210_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB210_22
+# BB#21:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB210_22:
+ movl %ecx, 40(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB210_24
+# BB#23:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB210_24:
+ movl %ecx, 44(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB210_26
+# BB#25:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB210_26:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB210_28
+# BB#27:
+ movl 104(%esp), %ecx # 4-byte Reload
+.LBB210_28:
+ movl %ecx, 52(%eax)
+ addl $1900, %esp # imm = 0x76C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end210:
+ .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L
+
+ .globl mcl_fp_montNF14L
+ .align 16, 0x90
+ .type mcl_fp_montNF14L,@function
+mcl_fp_montNF14L: # @mcl_fp_montNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1884, %esp # imm = 0x75C
+ calll .L211$pb
+.L211$pb:
+ popl %ebx
+.Ltmp42:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx
+ movl 1916(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1824(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1824(%esp), %edi
+ movl 1828(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1880(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1860(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1856(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1852(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1844(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1840(%esp), %esi
+ movl 1836(%esp), %ebp
+ movl 1832(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1760(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1760(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1768(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 1776(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1808(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1816(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1752(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1696(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1708(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1740(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 1916(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ addl 1632(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1664(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1624(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1568(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1596(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1612(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1504(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1544(%esp), %esi
+ adcl 1548(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1560(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1440(%esp), %ecx
+ movl 1908(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 1496(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1440(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1464(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1476(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1488(%esp), %esi
+ adcl 1492(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1424(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1368(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1312(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1248(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ adcl 1276(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1284(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1184(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1240(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1184(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1216(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1232(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1120(%esp), %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1156(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1112(%esp), %eax
+ movl %ebp, %ecx
+ addl 1056(%esp), %ecx
+ adcl 1060(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1064(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1068(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1072(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1076(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1080(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1084(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1088(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1092(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1096(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1100(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1104(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 992(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1044(%esp), %ebp
+ adcl 1048(%esp), %esi
+ movl 1912(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 984(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 940(%esp), %edi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 976(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 864(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 876(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 884(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 916(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 920(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 856(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 800(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 816(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 828(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 852(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 736(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 764(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 772(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 780(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 728(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 672(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 608(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 616(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 644(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 600(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 544(%esp), %ecx
+ adcl 548(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 556(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 568(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 576(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 480(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 488(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 472(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ adcl 420(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ adcl 428(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 464(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 352(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 364(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 388(%esp), %edi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 344(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 288(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 296(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ adcl 324(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 224(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ adcl 264(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 216(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 160(%esp), %ecx
+ adcl 164(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 192(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 96(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 104(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ adcl 108(%esp), %esi
+ adcl 112(%esp), %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1916(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ebx
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %edi, %ecx
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 48(%ebp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 52(%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 92(%esp), %ebp # 4-byte Reload
+ js .LBB211_2
+# BB#1:
+ movl %edx, %ebp
+.LBB211_2:
+ movl 1904(%esp), %edx
+ movl %ebp, (%edx)
+ movl 88(%esp), %ebp # 4-byte Reload
+ js .LBB211_4
+# BB#3:
+ movl %ebx, %ebp
+.LBB211_4:
+ movl %ebp, 4(%edx)
+ js .LBB211_6
+# BB#5:
+ movl %eax, %esi
+.LBB211_6:
+ movl %esi, 8(%edx)
+ js .LBB211_8
+# BB#7:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB211_8:
+ movl %edi, 12(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB211_10
+# BB#9:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB211_10:
+ movl %eax, 16(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB211_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB211_12:
+ movl %eax, 20(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB211_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB211_14:
+ movl %eax, 24(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB211_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB211_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB211_18
+# BB#17:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB211_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB211_20
+# BB#19:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB211_20:
+ movl %eax, 36(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB211_22
+# BB#21:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB211_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB211_24
+# BB#23:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB211_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB211_26
+# BB#25:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB211_26:
+ movl %eax, 48(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB211_28
+# BB#27:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB211_28:
+ movl %eax, 52(%edx)
+ addl $1884, %esp # imm = 0x75C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end211:
+ .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L
+
+ .globl mcl_fp_montRed14L
+ .align 16, 0x90
+ .type mcl_fp_montRed14L,@function
+mcl_fp_montRed14L: # @mcl_fp_montRed14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1068, %esp # imm = 0x42C
+ calll .L212$pb
+.L212$pb:
+ popl %eax
+.Ltmp43:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1096(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1092(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 108(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 52(%ecx), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 40(%ecx), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1008(%esp), %ecx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1008(%esp), %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 944(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 976(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %esi # 4-byte Reload
+ adcl 1000(%esp), %esi
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 880(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 908(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 920(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 816(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 820(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 752(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 688(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 624(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 628(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 560(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1096(%esp), %eax
+ movl %eax, %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 496(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 432(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ adcl 456(%esp), %ebp
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 468(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ movl 88(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 368(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 392(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 408(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %ebp
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 304(%esp), %ebp
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 316(%esp), %ebp
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 240(%esp), %edi
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ adcl 248(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 268(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 176(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx # 4-byte Reload
+ adcl 188(%esp), %ebx
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 196(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl %edi, %eax
+ adcl 200(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 212(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 172(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 8(%esp), %ebp # 4-byte Folded Reload
+ sbbl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 168(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 148(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 156(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 152(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 124(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 140(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 128(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %edx # 4-byte Reload
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ sbbl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 144(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB212_2
+# BB#1:
+ movl %eax, 168(%esp) # 4-byte Spill
+.LBB212_2:
+ movl %esi, %edx
+ testb %dl, %dl
+ movl 172(%esp), %eax # 4-byte Reload
+ jne .LBB212_4
+# BB#3:
+ movl %edi, %eax
+.LBB212_4:
+ movl 1088(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ movl 160(%esp), %ecx # 4-byte Reload
+ jne .LBB212_6
+# BB#5:
+ movl %ebp, %ecx
+.LBB212_6:
+ movl %ecx, 4(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ jne .LBB212_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB212_8:
+ movl %ebp, 8(%edi)
+ movl 168(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 124(%esp), %ebp # 4-byte Reload
+ movl 136(%esp), %ebx # 4-byte Reload
+ jne .LBB212_10
+# BB#9:
+ movl 80(%esp), %ebx # 4-byte Reload
+.LBB212_10:
+ movl %ebx, 16(%edi)
+ movl 140(%esp), %ebx # 4-byte Reload
+ movl 148(%esp), %esi # 4-byte Reload
+ jne .LBB212_12
+# BB#11:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB212_12:
+ movl %esi, 20(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB212_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB212_14:
+ movl %eax, 24(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ jne .LBB212_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 152(%esp) # 4-byte Spill
+.LBB212_16:
+ movl 152(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB212_18
+# BB#17:
+ movl 96(%esp), %ebp # 4-byte Reload
+.LBB212_18:
+ movl %ebp, 32(%edi)
+ jne .LBB212_20
+# BB#19:
+ movl 100(%esp), %ebx # 4-byte Reload
+.LBB212_20:
+ movl %ebx, 36(%edi)
+ jne .LBB212_22
+# BB#21:
+ movl 112(%esp), %esi # 4-byte Reload
+.LBB212_22:
+ movl %esi, 40(%edi)
+ jne .LBB212_24
+# BB#23:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB212_24:
+ movl %edx, 44(%edi)
+ jne .LBB212_26
+# BB#25:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB212_26:
+ movl %ecx, 48(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB212_28
+# BB#27:
+ movl 144(%esp), %eax # 4-byte Reload
+.LBB212_28:
+ movl %eax, 52(%edi)
+ addl $1068, %esp # imm = 0x42C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end212:
+ .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L
+
+ .globl mcl_fp_addPre14L
+ .align 16, 0x90
+ .type mcl_fp_addPre14L,@function
+mcl_fp_addPre14L: # @mcl_fp_addPre14L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl 48(%eax), %edi
+ movl %esi, 40(%ebx)
+ movl 48(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 44(%ebx)
+ movl %esi, 48(%ebx)
+ movl 52(%eax), %eax
+ movl 52(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 52(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end213:
+ .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L
+
+ .globl mcl_fp_subPre14L
+ .align 16, 0x90
+ .type mcl_fp_subPre14L,@function
+mcl_fp_subPre14L: # @mcl_fp_subPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ebp)
+ movl 48(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 44(%ebp)
+ movl %edi, 48(%ebp)
+ movl 52(%edx), %edx
+ movl 52(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 52(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end214:
+ .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L
+
+ .globl mcl_fp_shr1_14L
+ .align 16, 0x90
+ .type mcl_fp_shr1_14L,@function
+mcl_fp_shr1_14L: # @mcl_fp_shr1_14L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 48(%ecx)
+ shrl %eax
+ movl %eax, 52(%ecx)
+ popl %esi
+ retl
+.Lfunc_end215:
+ .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L
+
+ .globl mcl_fp_add14L
+ .align 16, 0x90
+ .type mcl_fp_add14L,@function
+mcl_fp_add14L: # @mcl_fp_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 68(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 8(%eax), %ecx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %ecx
+ adcl 12(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 16(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%ebp), %ecx
+ adcl 20(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %ecx
+ adcl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ adcl 28(%eax), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ adcl 32(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebp), %ecx
+ adcl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 40(%ebp), %edx
+ adcl 40(%eax), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 44(%ebp), %ebx
+ adcl 44(%eax), %ebx
+ movl 48(%ebp), %esi
+ adcl 48(%eax), %esi
+ movl 52(%ebp), %edi
+ adcl 52(%eax), %edi
+ movl 64(%esp), %eax
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %edx, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 76(%esp), %edx
+ subl (%edx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edx), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edx), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ sbbl 20(%edx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 32(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 36(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebp # 4-byte Reload
+ sbbl 40(%edx), %ebp
+ sbbl 44(%edx), %ebx
+ sbbl 48(%edx), %esi
+ sbbl 52(%edx), %edi
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB216_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %ebp, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+.LBB216_2: # %carry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end216:
+ .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L
+
+ .globl mcl_fp_addNF14L
+ .align 16, 0x90
+ .type mcl_fp_addNF14L,@function
+mcl_fp_addNF14L: # @mcl_fp_addNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 140(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 136(%esp), %ecx
+ addl (%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 4(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%eax), %ebp
+ movl 36(%eax), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %edx
+ adcl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 28(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 36(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 40(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 44(%ecx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%ecx), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%ecx), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ subl (%ecx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 4(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 24(%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%ecx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ movl %eax, %ebp
+ sbbl 44(%ecx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 48(%ecx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 52(%ecx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 72(%esp), %ecx # 4-byte Reload
+ js .LBB217_2
+# BB#1:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB217_2:
+ movl 132(%esp), %edi
+ movl %ecx, (%edi)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB217_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB217_4:
+ movl %eax, 4(%edi)
+ movl %edx, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB217_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB217_6:
+ movl %edx, 8(%edi)
+ movl %ebp, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB217_8
+# BB#7:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB217_8:
+ movl %ebp, 12(%edi)
+ movl 100(%esp), %ebp # 4-byte Reload
+ js .LBB217_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB217_10:
+ movl %eax, 16(%edi)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB217_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+.LBB217_12:
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ js .LBB217_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB217_14:
+ movl %ecx, 24(%edi)
+ js .LBB217_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB217_16:
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ js .LBB217_18
+# BB#17:
+ movl 32(%esp), %ebp # 4-byte Reload
+.LBB217_18:
+ movl %ebp, 32(%edi)
+ js .LBB217_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB217_20:
+ movl %ebx, 36(%edi)
+ js .LBB217_22
+# BB#21:
+ movl 40(%esp), %esi # 4-byte Reload
+.LBB217_22:
+ movl %esi, 40(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB217_24
+# BB#23:
+ movl 44(%esp), %edx # 4-byte Reload
+.LBB217_24:
+ movl %edx, 44(%edi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB217_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB217_26:
+ movl %eax, 48(%edi)
+ js .LBB217_28
+# BB#27:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB217_28:
+ movl %ecx, 52(%edi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end217:
+ .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L
+
+ .globl mcl_fp_sub14L
+ .align 16, 0x90
+ .type mcl_fp_sub14L,@function
+mcl_fp_sub14L: # @mcl_fp_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 80(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %edx
+ sbbl 36(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ sbbl 40(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esi), %ebp
+ sbbl 48(%edi), %ebp
+ movl 52(%esi), %esi
+ sbbl 52(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 72(%esp), %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl %ebp, 48(%ebx)
+ movl %esi, 52(%ebx)
+ je .LBB218_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 84(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+.LBB218_2: # %nocarry
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end218:
+ .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L
+
+ .globl mcl_fp_subNF14L
+ .align 16, 0x90
+ .type mcl_fp_subNF14L,@function
+mcl_fp_subNF14L: # @mcl_fp_subNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl 52(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 116(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ addl %ecx, %ecx
+ movl %esi, %ebp
+ adcl %ebp, %ebp
+ shrl $31, %eax
+ orl %ecx, %eax
+ movl 120(%esp), %edi
+ andl 4(%edi), %ebp
+ andl (%edi), %eax
+ movl 52(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ movl 12(%edi), %ecx
+ andl %esi, %ecx
+ andl 8(%edi), %esi
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl 108(%esp), %edi
+ movl %eax, (%edi)
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, 4(%edi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%edi)
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %eax, 48(%edi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%edi)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end219:
+ .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L
+
+ .globl mcl_fpDbl_add14L
+ .align 16, 0x90
+ .type mcl_fpDbl_add14L,@function
+mcl_fpDbl_add14L: # @mcl_fpDbl_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %ecx
+ movl 124(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 120(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 64(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %edx, 48(%eax)
+ movl 56(%esi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ movl 100(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 104(%ecx), %edx
+ movl 104(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 108(%ecx), %ecx
+ movl 108(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 132(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ sbbl 40(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 52(%ebp), %esi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB220_2
+# BB#1:
+ movl %esi, %ebx
+.LBB220_2:
+ testb %dl, %dl
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB220_4
+# BB#3:
+ movl %ecx, %edx
+ movl (%esp), %edi # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB220_4:
+ movl 120(%esp), %esi
+ movl %eax, 56(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 60(%esi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%esi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esi)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esi)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esi)
+ movl %ebp, 84(%esi)
+ movl %edi, 88(%esi)
+ movl %edx, 92(%esi)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB220_6
+# BB#5:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB220_6:
+ movl %eax, 96(%esi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB220_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB220_8:
+ movl %edx, 100(%esi)
+ jne .LBB220_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB220_10:
+ movl %ecx, 104(%esi)
+ movl %ebx, 108(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end220:
+ .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L
+
+ .globl mcl_fpDbl_sub14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub14L,@function
+mcl_fpDbl_sub14L: # @mcl_fpDbl_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 124(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 116(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl 40(%ebp), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %esi, %edx
+ movl 44(%ebp), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%ebx), %eax
+ sbbl %esi, %eax
+ movl 48(%ebp), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%ebx), %edx
+ sbbl %esi, %edx
+ movl 52(%ebp), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%ebx), %eax
+ sbbl %esi, %eax
+ movl 56(%ebp), %esi
+ movl %edx, 48(%ecx)
+ movl 56(%ebx), %edx
+ sbbl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%ebp), %edx
+ movl %eax, 52(%ecx)
+ movl 60(%ebx), %eax
+ sbbl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 88(%ebp), %eax
+ movl 88(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 92(%ebp), %eax
+ movl 92(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%ebp), %eax
+ movl 96(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 100(%ebp), %eax
+ movl 100(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 104(%ebp), %eax
+ movl 104(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 108(%ebp), %eax
+ movl 108(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 128(%esp), %ebp
+ jne .LBB221_1
+# BB#2:
+ movl $0, 56(%esp) # 4-byte Folded Spill
+ jmp .LBB221_3
+.LBB221_1:
+ movl 52(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB221_3:
+ testb %al, %al
+ jne .LBB221_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB221_6
+.LBB221_4:
+ movl (%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB221_6:
+ jne .LBB221_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB221_9
+.LBB221_7:
+ movl 48(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB221_9:
+ jne .LBB221_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB221_12
+.LBB221_10:
+ movl 44(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB221_12:
+ jne .LBB221_13
+# BB#14:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB221_15
+.LBB221_13:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB221_15:
+ jne .LBB221_16
+# BB#17:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB221_18
+.LBB221_16:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB221_18:
+ jne .LBB221_19
+# BB#20:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB221_21
+.LBB221_19:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB221_21:
+ jne .LBB221_22
+# BB#23:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB221_24
+.LBB221_22:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB221_24:
+ jne .LBB221_25
+# BB#26:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB221_27
+.LBB221_25:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB221_27:
+ jne .LBB221_28
+# BB#29:
+ movl $0, %esi
+ jmp .LBB221_30
+.LBB221_28:
+ movl 20(%ebp), %esi
+.LBB221_30:
+ jne .LBB221_31
+# BB#32:
+ movl $0, %edi
+ jmp .LBB221_33
+.LBB221_31:
+ movl 16(%ebp), %edi
+.LBB221_33:
+ jne .LBB221_34
+# BB#35:
+ movl $0, %ebx
+ jmp .LBB221_36
+.LBB221_34:
+ movl 12(%ebp), %ebx
+.LBB221_36:
+ jne .LBB221_37
+# BB#38:
+ xorl %ebp, %ebp
+ jmp .LBB221_39
+.LBB221_37:
+ movl 8(%ebp), %ebp
+.LBB221_39:
+ movl 20(%esp), %edx # 4-byte Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 64(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 72(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 76(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl %eax, 104(%ecx)
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%ecx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L
+
+ .align 16, 0x90
+ .type .LmulPv480x32,@function
+.LmulPv480x32: # @mulPv480x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl %edx, %ebp
+ movl 124(%esp), %esi
+ movl %esi, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebp)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebp)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%ecx)
+ movl %ecx, %eax
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+
+ .globl mcl_fp_mulUnitPre15L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre15L,@function
+mcl_fp_mulUnitPre15L: # @mcl_fp_mulUnitPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L223$pb
+.L223$pb:
+ popl %ebx
+.Ltmp44:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L
+
+ .globl mcl_fpDbl_mulPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre15L,@function
+mcl_fpDbl_mulPre15L: # @mcl_fpDbl_mulPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L224$pb
+.L224$pb:
+ popl %esi
+.Ltmp45:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1108(%esp), %edi
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1112(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1108(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L
+
+ .globl mcl_fpDbl_sqrPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre15L,@function
+mcl_fpDbl_sqrPre15L: # @mcl_fpDbl_sqrPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L225$pb
+.L225$pb:
+ popl %ebx
+.Ltmp46:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 1108(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L
+
+ .globl mcl_fp_mont15L
+ .align 16, 0x90
+ .type mcl_fp_mont15L,@function
+mcl_fp_mont15L: # @mcl_fp_mont15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2044, %esp # imm = 0x7FC
+ calll .L226$pb
+.L226$pb:
+ popl %ebx
+.Ltmp47:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx
+ movl 2076(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1976(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 1976(%esp), %ebp
+ movl 1980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2036(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2032(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2028(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2024(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2020(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2000(%esp), %edi
+ movl 1996(%esp), %esi
+ movl 1992(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ addl 1912(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1932(%esp), %esi
+ adcl 1936(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1968(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1972(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1848(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1864(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1868(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1892(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1896(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1900(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1904(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1908(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2076(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1784(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1812(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1836(%esp), %esi
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1840(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1744(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1756(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1768(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1780(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1656(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 1656(%esp), %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1688(%esp), %ebp
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1712(%esp), %edi
+ adcl 1716(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1592(%esp), %ecx
+ movl 2068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1592(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1628(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1528(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1528(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1544(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1564(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1568(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1572(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1464(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 1464(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1476(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1496(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1500(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1504(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 1512(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1400(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1400(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1420(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1424(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1428(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1432(%esp), %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1448(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1452(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1364(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ adcl 1384(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1392(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1272(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1320(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1328(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2072(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1232(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1244(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1268(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 1144(%esp), %eax
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1156(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1180(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1196(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1080(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1128(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1016(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1032(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1044(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 952(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 964(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 976(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 888(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 908(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 924(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 856(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 864(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ adcl 876(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 760(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 776(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 800(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 804(%esp), %ebp
+ adcl 808(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 816(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 708(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 736(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 752(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 632(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 672(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 568(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 604(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 504(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 516(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 560(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 440(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 448(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 492(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 376(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 388(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 336(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 248(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 252(%esp), %esi
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 256(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 288(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl %esi, %ecx
+ movl 96(%esp), %esi # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ adcl 200(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 104(%esp), %ebx # 4-byte Reload
+ andl $1, %ebx
+ addl 120(%esp), %edi
+ movl %ebp, %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 132(%esp), %edi
+ adcl 136(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2076(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ecx
+ movl %edi, %eax
+ sbbl 8(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 12(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ebp), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ebp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%ebp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 48(%ebp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 52(%ebp), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 56(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB226_2
+# BB#1:
+ movl %edx, %ebp
+.LBB226_2:
+ movl 2064(%esp), %edx
+ movl %ebp, (%edx)
+ testb %bl, %bl
+ movl 116(%esp), %ebp # 4-byte Reload
+ jne .LBB226_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB226_4:
+ movl %ebp, 4(%edx)
+ jne .LBB226_6
+# BB#5:
+ movl %eax, %edi
+.LBB226_6:
+ movl %edi, 8(%edx)
+ jne .LBB226_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB226_8:
+ movl %esi, 12(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB226_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB226_10:
+ movl %eax, 16(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB226_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB226_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB226_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB226_14:
+ movl %eax, 24(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB226_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB226_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB226_18
+# BB#17:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB226_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB226_20
+# BB#19:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB226_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB226_22
+# BB#21:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB226_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB226_24
+# BB#23:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB226_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB226_26
+# BB#25:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB226_26:
+ movl %eax, 48(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB226_28
+# BB#27:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB226_28:
+ movl %eax, 52(%edx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB226_30
+# BB#29:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB226_30:
+ movl %eax, 56(%edx)
+ addl $2044, %esp # imm = 0x7FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end226:
+ .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L
+
+ .globl mcl_fp_montNF15L
+ .align 16, 0x90
+ .type mcl_fp_montNF15L,@function
+mcl_fp_montNF15L: # @mcl_fp_montNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2028, %esp # imm = 0x7EC
+ calll .L227$pb
+.L227$pb:
+ popl %ebx
+.Ltmp48:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx
+ movl 2060(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1960(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1960(%esp), %ebp
+ movl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2020(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1992(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1980(%esp), %esi
+ movl 1976(%esp), %edi
+ movl 1972(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1968(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1896(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1896(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1912(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1916(%esp), %esi
+ movl %esi, %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1944(%esp), %ebp
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1948(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1832(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1892(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 1832(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1840(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1844(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1848(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1876(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1880(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1768(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1784(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, %esi
+ adcl 1820(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1824(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1764(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1704(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1752(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1756(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1640(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1688(%esp), %edi
+ adcl 1692(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1700(%esp), %ebp
+ movl 2056(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1636(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1576(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1616(%esp), %esi
+ adcl 1620(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1632(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1512(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1512(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1532(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1548(%esp), %ebp
+ adcl 1552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1448(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1508(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1448(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1464(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1476(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1480(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1484(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1496(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1504(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1408(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1380(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 1324(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1368(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 1256(%esp), %eax
+ adcl 1260(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1296(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1304(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1312(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 1252(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ addl 1192(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1212(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1228(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1128(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1140(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1148(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1168(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1188(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1124(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 1064(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1072(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1084(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1100(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1120(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1000(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1020(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1028(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1060(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 996(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 936(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 952(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 968(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 984(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 992(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 872(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 876(%esp), %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 880(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 868(%esp), %eax
+ movl %ebp, %ecx
+ addl 808(%esp), %ecx
+ adcl 812(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 816(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 820(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 824(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 828(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 832(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 836(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 840(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 844(%esp), %esi
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 848(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 852(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 856(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 860(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 864(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 744(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload